diff options
Diffstat (limited to 'playbooks')
265 files changed, 5093 insertions, 5260 deletions
diff --git a/playbooks/README.md b/playbooks/README.md index 5857a9f59..290d4c082 100644 --- a/playbooks/README.md +++ b/playbooks/README.md @@ -12,8 +12,6 @@ And: - [`adhoc`](adhoc) is a generic home for playbooks and tasks that are community supported and not officially maintained. -- [`aws`](aws), [`gce`](gce), [`libvirt`](libvirt) and [`openstack`](openstack) - are related to the [`bin/cluster`](../bin) tool and its usage is deprecated. Refer to the `README.md` file in each playbook directory for more information about them. diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml index 91948c72e..eec6c23a7 100644 --- a/playbooks/adhoc/contiv/delete_contiv.yml +++ b/playbooks/adhoc/contiv/delete_contiv.yml @@ -1,5 +1,5 @@ --- -- name: delete contiv +- name: Uninstall contiv hosts: all gather_facts: False tasks: diff --git a/playbooks/adhoc/create_pv/create_pv.yaml b/playbooks/adhoc/create_pv/create_pv.yaml index 81c1ee653..64f861c6a 100644 --- a/playbooks/adhoc/create_pv/create_pv.yaml +++ b/playbooks/adhoc/create_pv/create_pv.yaml @@ -20,7 +20,7 @@ pre_tasks: - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_volume_size - cli_device_name diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml index f638fab83..507ac0f05 100644 --- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml @@ -33,7 +33,7 @@ pre_tasks: - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_tag_name - cli_volume_size diff --git a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml index d988a28b0..3059d3dc5 100755 --- a/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml +++ b/playbooks/adhoc/docker_loopback_to_lvm/ops-docker-loopback-to-direct-lvm.yml @@ -24,7 +24,7 @@ pre_tasks: - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_docker_device diff --git a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml index b6dde357e..5e12cd181 100644 --- a/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml +++ b/playbooks/adhoc/docker_storage_cleanup/docker_storage_cleanup.yml @@ -25,7 +25,7 @@ - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_tag_name diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py index daff68fbe..cacd0b0f3 100644 --- a/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py +++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/grow_docker_vg_filters.py @@ -1,6 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 ''' Custom filters for use in openshift-ansible ''' diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml index 598f1966d..eb8440d1b 100644 --- a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml +++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml @@ -42,7 +42,7 @@ pre_tasks: - fail: msg: "This playbook requires {{item}} to be set." - when: "{{ item }} is not defined or {{ item }} == ''" + when: item is not defined or item == '' with_items: - cli_tag_name - cli_volume_size diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index ffdcd0ce1..58b3a7835 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -26,6 +26,20 @@ - hosts: nodes become: yes tasks: + - name: Remove dnsmasq dispatcher + file: + path: "{{ item }}" + state: absent + with_items: + - /etc/dnsmasq.d/origin-dns.conf + - /etc/dnsmasq.d/origin-upstream-dns.conf + - /etc/dnsmasq.d/openshift-ansible.conf + - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh + when: openshift_use_dnsmasq | default(true) | bool + - service: + name: NetworkManager + state: restarted + when: openshift_use_dnsmasq | default(true) | bool - name: Stop services service: name={{ item }} state=stopped with_items: @@ -103,7 +117,7 @@ - atomic-openshift-sdn-ovs - cockpit-bridge - cockpit-docker - - cockpit-shell + - cockpit-system - cockpit-ws - kubernetes-client - openshift @@ -125,7 +139,7 @@ - name: Remove flannel package package: name=flannel state=absent when: openshift_use_flannel | default(false) | bool - when: "{{ not is_atomic | bool }}" + when: not is_atomic | bool - shell: systemctl reset-failed changed_when: False @@ -146,7 +160,7 @@ - lbr0 - vlinuxbr - vovsbr - when: "{{ openshift_remove_all | default(true) | bool }}" + when: openshift_remove_all | default(true) | bool - shell: atomic uninstall "{{ item }}"-master-api changed_when: False @@ -239,7 +253,7 @@ changed_when: False failed_when: False with_items: "{{ images_to_delete.results }}" - when: "{{ openshift_uninstall_images | default(True) | bool }}" + when: openshift_uninstall_images | default(True) | bool - name: remove sdn drop files file: @@ -252,7 +266,7 @@ - /etc/sysconfig/openshift-node - /etc/sysconfig/openvswitch - /run/openshift-sdn - when: "{{ openshift_remove_all | default(True) | bool }}" + when: openshift_remove_all | default(True) | bool - find: path={{ item }} file_type=file register: files @@ -279,9 +293,6 @@ with_items: - /etc/ansible/facts.d/openshift.fact - /etc/atomic-enterprise - - /etc/dnsmasq.d/origin-dns.conf - - /etc/dnsmasq.d/origin-upstream-dns.conf - - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh - /etc/openshift - /etc/openshift-sdn - /etc/sysconfig/atomic-enterprise-node @@ -305,11 +316,19 @@ - shell: systemctl daemon-reload changed_when: False + - name: restart container-engine + service: name=container-engine state=restarted + failed_when: false + register: container_engine + - name: restart docker service: name=docker state=restarted - - - name: restart NetworkManager - service: name=NetworkManager state=restarted + failed_when: false + when: not (container_engine | changed) + register: l_docker_restart_docker_in_pb_result + until: not l_docker_restart_docker_in_pb_result | failed + retries: 3 + delay: 30 - hosts: masters become: yes @@ -339,7 +358,7 @@ - atomic-openshift-master - cockpit-bridge - cockpit-docker - - cockpit-shell + - cockpit-system - cockpit-ws - corosync - kubernetes-client @@ -386,10 +405,19 @@ - "{{ directories.results | default([]) }}" - files + - set_fact: + client_users: "{{ [ansible_ssh_user, 'root'] | unique }}" + + - name: Remove client kubeconfigs + file: + path: "~{{ item }}/.kube" + state: absent + with_items: + - "{{ client_users }}" + - name: Remove remaining files file: path={{ item }} state=absent with_items: - - "~{{ ansible_ssh_user }}/.kube" - /etc/ansible/facts.d/openshift.fact - /etc/atomic-enterprise - /etc/corosync @@ -414,7 +442,6 @@ - /etc/sysconfig/origin-master - /etc/sysconfig/origin-master-api - /etc/sysconfig/origin-master-controllers - - /root/.kube - /usr/share/openshift/examples - /var/lib/atomic-enterprise - /var/lib/openshift diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index 99698b4d0..0fb29ca06 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -1,4 +1,268 @@ # AWS playbooks -This playbook directory is meant to be driven by [`bin/cluster`](../../bin), -which is community supported and most use is considered deprecated. +## Provisioning + +With recent desire for provisioning from customers and developers alike, the AWS + playbook directory now supports a limited set of ansible playbooks to achieve a + complete cluster setup. These playbooks bring into alignment our desire to + deploy highly scalable Openshift clusters utilizing AWS auto scale groups and + custom AMIs. + +### Where do I start? + +Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways: + +- Create the following file `~/.aws/credentials` with the contents (substitute your access key and secret key): + ``` + [myaccount] + aws_access_key_id = <Your access_key here> + aws_secret_access_key = <Your secret acces key here> + ``` + From the shell: + ``` + $ export AWS_PROFILE=myaccount + ``` + --- +- Alternatively to using a profile you can export your AWS credentials as environment variables. + ``` + $ export AWS_ACCESS_KEY_ID=AKIXXXXXX + $ export AWS_SECRET_ACCESS_KEY=XXXXXX + ``` + +### Let's Provision! + +The newly added playbooks are the following: +- build_ami.yml +- provision.yml +- provision_nodes.yml + +The current expected work flow should be to provide the `vars.yml` file with the +desired settings for cluster instances. These settings are AWS specific and should +be tailored to the consumer's AWS custom account settings. + +```yaml +clusterid: mycluster +region: us-east-1 + +provision: + clusterid: "{{ clusterid }}" + region: "{{ region }}" + + build: + base_image: ami-bdd5d6ab # base image for AMI to build from + # when creating an encrypted AMI please specify use_encryption + use_encryption: False + + # for s3 registry backend + openshift_registry_s3: True + + # if using custom certificates these are required for the ELB + iam_cert_ca: + name: test_openshift + cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' + key_path: '/path/to/wildcard.<clusterid>.example.com.key' + chain_path: '/path/to/cert.ca.crt' + + instance_users: + - key_name: myuser_key + username: myuser + pub_key: | + ssh-rsa aaa<place public ssh key here>aaaaa user@<clusterid> + + node_group_config: + tags: + clusterid: "{{ clusterid }}" + environment: stg + ssh_key_name: myuser_key # name of the ssh key from above + + # configure master settings here + master: + instance_type: m4.xlarge + ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: False + health_check: + period: 60 + type: EC2 + # Set the following number to be the same for masters. + min_size: 3 + max_size: 3 + desired_size: 3 + tags: + host-type: master + sub-host-type: default + wait_for_instances: True +... + vpc: + # name: mycluster # If missing; will default to clusterid + cidr: 172.31.0.0/16 + subnets: + us-east-1: # These are us-east-1 region defaults. Ensure this matches your region + - cidr: 172.31.48.0/20 + az: "us-east-1c" + - cidr: 172.31.32.0/20 + az: "us-east-1e" + - cidr: 172.31.16.0/20 + az: "us-east-1a" + +``` + +Repeat the following setup for the infra and compute node groups. This most likely + will not need editing but if the install requires further customization then these parameters + can be updated. + +#### Step 1 + +Create an openshift-ansible inventory file to use for a byo installation. The exception here is that there will be no hosts specified by the inventory file. Here is an example: + +```ini +[OSEv3:children] +masters +nodes +etcd + +[OSEv3:children] +masters +nodes +etcd + +[OSEv3:vars] +################################################################################ +# Ensure these variables are set for bootstrap +################################################################################ +openshift_master_bootstrap_enabled=True + +openshift_hosted_router_wait=False +openshift_hosted_registry_wait=False + +# Repository for installation +openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}] + +################################################################################ +# cluster specific settings maybe be placed here + +[masters] + +[etcd] + +[nodes] +``` + +There are more examples of cluster inventory settings [`here`](../../inventory/byo/). + +In order to create the bootstrapable AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. + + +#### Step 2 + +Once the vars.yml file has been updated with the correct settings for the desired AWS account then we are ready to build an AMI. + +``` +$ ansible-playbook -i inventory.yml build_ami.yml +``` + +1. This script will build a VPC. Default name will be clusterid if not specified. +2. Create an ssh key required for the instance. +3. Create an instance. +4. Run some setup roles to ensure packages and services are correctly configured. +5. Create the AMI. +6. If encryption is desired + - A KMS key is created with the name of $clusterid + - An encrypted AMI will be produced with $clusterid KMS key +7. Terminate the instance used to configure the AMI. + + +#### Step 3 + +Now that we have created an AMI for our Openshift installation, that AMI id needs to be placed in the `vars.yml` file. To do so update the following fields (The AMI can be captured from the output of the previous step or found in the ec2 console under AMIs): + +``` + # when creating an encrypted AMI please specify use_encryption + use_encryption: False # defaults to false +``` + +**Note**: If using encryption, specify with `use_encryption: True`. This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false. The AMI id will be fetched and used according to its most recent creation date. + + +#### Step 4 + +We are ready to create the master instances and install Openshift. + +``` +$ ansible-playbook -i <inventory from step 1> provision.yml +``` + +This playbook runs through the following steps: +1. Ensures a VPC is created +2. Ensures a SSH key exists +3. Creates an s3 bucket for the registry named $clusterid +4. Create master security groups +5. Create a master launch config +6. Create the master auto scaling groups +7. If certificates are desired for ELB, they will be uploaded +8. Create internal and external master ELBs +9. Add newly created masters to the correct groups +10. Set a couple of important facts for the masters +11. Run the [`byo`](../../common/openshift-cluster/config.yml) + +At this point we have created a successful cluster with only the master nodes. + + +#### Step 5 + +Now that we have a cluster deployed it might be more interesting to create some node types. This can be done easily with the following playbook: + +``` +$ ansible-playbook provision_nodes.yml +``` + +Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator. + +#### Step 6 + +The registration of our nodes can be automated by running the following script `accept.yml`. This script can handle the registration in a few different ways. +- approve_all - **Note**: this option is for development and test environments. Security is bypassed +- nodes - A list of node names that will be accepted into the cluster + +```yaml + oc_adm_csr: + #approve_all: True + nodes: < list of nodes here > + timeout: 0 +``` +Once the desired accept method is chosen, run the following playbook `accept.yml`: +1. Run the following playbook. +``` +$ ansible-playbook accept.yml +``` + +Login to a master and run the following command: +``` +ssh root@<master ip address> +$ oc --config=/etc/origin/master/admin.kubeconfig get csr +node-bootstrapper-client-ip-172-31-49-148-ec2-internal 1h system:serviceaccount:openshift-infra:node-bootstrapper Approved,Issued +node-bootstrapper-server-ip-172-31-49-148-ec2-internal 1h system:node:ip-172-31-49-148.ec2.internal Approved,Issued +``` + +Verify the `CONDITION` is `Approved,Issued` on the `csr` objects. There are two for each node required. +1. `node-bootstrapper-client` is a request to access the api/controllers. +2. `node-bootstrapper-server` is a request to join the cluster. + +Once this is complete, verify the nodes have joined the cluster and are `ready`. + +``` +$ oc --config=/etc/origin/master/admin.kubeconfig get nodes +NAME STATUS AGE VERSION +ip-172-31-49-148.ec2.internal Ready 1h v1.6.1+5115d708d7 +``` + +### Ready To Work! + +At this point your cluster should be ready for workloads. Proceed to deploy applications on your cluster. + +### Still to compute + +There are more enhancements that are arriving for provisioning. These will include more playbooks that enhance the provisioning capabilities. diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml new file mode 100755 index 000000000..d43c84205 --- /dev/null +++ b/playbooks/aws/openshift-cluster/accept.yml @@ -0,0 +1,48 @@ +--- +- name: Setup the vpc and the master node group + #hosts: oo_first_master + hosts: localhost + remote_user: root + gather_facts: no + tasks: + - name: get provisioning vars + include_vars: vars.yml + + - name: bring lib_openshift into scope + include_role: + name: lib_openshift + + - name: fetch masters + ec2_remote_facts: + region: "{{ provision.region }}" + filters: + "tag:clusterid": "{{ provision.clusterid }}" + "tag:host-type": master + instance-state-name: running + register: mastersout + retries: 20 + delay: 3 + until: "'instances' in mastersout and mastersout.instances|length > 0" + + - name: fetch new node instances + ec2_remote_facts: + region: "{{ provision.region }}" + filters: + "tag:clusterid": "{{ provision.clusterid }}" + "tag:host-type": node + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: "'instances' in instancesout and instancesout.instances|length > 0" + + - debug: + msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}" + + - name: approve nodes + oc_adm_csr: + #approve_all: True + nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}" + timeout: 0 + register: nodeout + delegate_to: "{{ mastersout.instances[0].public_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/add_nodes.yml b/playbooks/aws/openshift-cluster/add_nodes.yml deleted file mode 100644 index 0e8eb90c1..000000000 --- a/playbooks/aws/openshift-cluster/add_nodes.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - vars: - oo_extend_env: True - tasks: - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - -- include: scaleup.yml -- include: list.yml diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml new file mode 100644 index 000000000..d27874200 --- /dev/null +++ b/playbooks/aws/openshift-cluster/build_ami.yml @@ -0,0 +1,150 @@ +--- +- hosts: localhost + connection: local + gather_facts: no + tasks: + - name: get the necessary vars for ami building + include_vars: vars.yml + + - name: create a vpc with the name <clusterid> + include_role: + name: openshift_aws_vpc + vars: + r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}" + r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}" + r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}" + r_openshift_aws_vpc_region: "{{ provision.region }}" + r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}" + r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}" + + - name: create aws ssh keypair + include_role: + name: openshift_aws_ssh_keys + vars: + r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}" + r_openshift_aws_ssh_keys_region: "{{ provision.region }}" + + - name: fetch the default subnet id + ec2_vpc_subnet_facts: + region: "{{ provision.region }}" + filters: + "tag:Name": "{{ provision.vpc.subnets[provision.region][0].az }}" + register: subnetout + + - name: create instance for ami creation + ec2: + assign_public_ip: yes + region: "{{ provision.region }}" + key_name: "{{ provision.node_group_config.ssh_key_name }}" + group: "{{ provision.clusterid }}" + instance_type: m4.xlarge + vpc_subnet_id: "{{ subnetout.subnets[0].id }}" + image: "{{ provision.build.base_image }}" + volumes: + - device_name: /dev/sdb + volume_type: gp2 + volume_size: 100 + delete_on_termination: true + wait: yes + exact_count: 1 + count_tag: + Name: ami_base + instance_tags: + Name: ami_base + register: amibase + + - name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ amibase.tagged_instances.0.public_ip }}" + timeout: 300 + search_regex: OpenSSH + + - name: add host to nodes + add_host: + groups: nodes + name: "{{ amibase.tagged_instances.0.public_dns_name }}" + + - name: set the user to perform installation + set_fact: + ansible_ssh_user: root + +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/evaluate_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/initialize_facts.yml + +- name: run the std_include + include: ../../common/openshift-cluster/initialize_openshift_repos.yml + +- hosts: nodes + remote_user: root + tasks: + - name: get the necessary vars for ami building + include_vars: vars.yml + + - set_fact: + openshift_node_bootstrap: True + + - name: run openshift image preparation + include_role: + name: openshift_node + +- hosts: localhost + connection: local + become: no + tasks: + - name: bundle ami + ec2_ami: + instance_id: "{{ amibase.tagged_instances.0.id }}" + region: "{{ provision.region }}" + state: present + description: "This was provisioned {{ ansible_date_time.iso8601 }}" + name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}" + tags: "{{ provision.build.openshift_ami_tags }}" + wait: yes + register: amioutput + + - debug: var=amioutput + + - when: provision.build.use_encryption | default(False) + block: + - name: setup kms key for encryption + include_role: + name: openshift_aws_iam_kms + vars: + r_openshift_aws_iam_kms_region: "{{ provision.region }}" + r_openshift_aws_iam_kms_alias: "alias/{{ provision.clusterid }}_kms" + + - name: augment the encrypted ami tags with source-ami + set_fact: + source_tag: + source-ami: "{{ amioutput.image_id }}" + + - name: copy the ami for encrypted disks + include_role: + name: openshift_aws_ami_copy + vars: + r_openshift_aws_ami_copy_region: "{{ provision.region }}" + r_openshift_aws_ami_copy_name: "{{ provision.build.ami_name }}{{ lookup('pipe', 'date +%Y%m%d%H%M')}}-encrypted" + r_openshift_aws_ami_copy_src_ami: "{{ amioutput.image_id }}" + r_openshift_aws_ami_copy_kms_alias: "alias/{{ provision.clusterid }}_kms" + r_openshift_aws_ami_copy_tags: "{{ source_tag | combine(provision.build.openshift_ami_tags) }}" + r_openshift_aws_ami_copy_encrypt: "{{ provision.build.use_encryption }}" + # this option currently fails due to boto waiters + # when supported this need to be reapplied + #r_openshift_aws_ami_copy_wait: True + + - name: Display newly created encrypted ami id + debug: + msg: "{{ r_openshift_aws_ami_copy_retval_custom_ami }}" + + - name: terminate temporary instance + ec2: + state: absent + region: "{{ provision.region }}" + instance_ids: "{{ amibase.tagged_instances.0.id }}" diff --git a/playbooks/aws/openshift-cluster/build_node_group.yml b/playbooks/aws/openshift-cluster/build_node_group.yml new file mode 100644 index 000000000..3ef492238 --- /dev/null +++ b/playbooks/aws/openshift-cluster/build_node_group.yml @@ -0,0 +1,47 @@ +--- +- name: fetch recently created AMI + ec2_ami_find: + region: "{{ provision.region }}" + sort: creationDate + sort_order: descending + name: "{{ provision.build.ami_name }}*" + ami_tags: "{{ provision.build.openshift_ami_tags }}" + #no_result_action: fail + register: amiout + +- block: + - name: "Create {{ openshift_build_node_type }} sgs" + include_role: + name: openshift_aws_sg + vars: + r_openshift_aws_sg_clusterid: "{{ provision.clusterid }}" + r_openshift_aws_sg_region: "{{ provision.region }}" + r_openshift_aws_sg_type: "{{ openshift_build_node_type }}" + + - name: "generate a launch config name for {{ openshift_build_node_type }}" + set_fact: + launch_config_name: "{{ provision.clusterid }}-{{ openshift_build_node_type }}-{{ ansible_date_time.epoch }}" + + - name: create "{{ openshift_build_node_type }} launch config" + include_role: + name: openshift_aws_launch_config + vars: + r_openshift_aws_launch_config_name: "{{ launch_config_name }}" + r_openshift_aws_launch_config_clusterid: "{{ provision.clusterid }}" + r_openshift_aws_launch_config_region: "{{ provision.region }}" + r_openshift_aws_launch_config: "{{ provision.node_group_config }}" + r_openshift_aws_launch_config_type: "{{ openshift_build_node_type }}" + r_openshift_aws_launch_config_custom_image: "{{ '' if 'results' not in amiout else amiout.results[0].ami_id }}" + r_openshift_aws_launch_config_bootstrap_token: "{{ (local_bootstrap['content'] |b64decode) if local_bootstrap is defined else '' }}" + + - name: "create {{ openshift_build_node_type }} node groups" + include_role: + name: openshift_aws_node_group + vars: + r_openshift_aws_node_group_name: "{{ provision.clusterid }} openshift {{ openshift_build_node_type }}" + r_openshift_aws_node_group_lc_name: "{{ launch_config_name }}" + r_openshift_aws_node_group_clusterid: "{{ provision.clusterid }}" + r_openshift_aws_node_group_region: "{{ provision.region }}" + r_openshift_aws_node_group_config: "{{ provision.node_group_config }}" + r_openshift_aws_node_group_type: "{{ openshift_build_node_type }}" + r_openshift_aws_node_group_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}" diff --git a/playbooks/aws/openshift-cluster/cluster_hosts.yml b/playbooks/aws/openshift-cluster/cluster_hosts.yml deleted file mode 100644 index fbaf81dec..000000000 --- a/playbooks/aws/openshift-cluster/cluster_hosts.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -g_all_hosts: "{{ groups['tag_clusterid_' ~ cluster_id] | default([]) - | intersect(groups['tag_environment_' ~ cluster_env] | default([])) }}" - -g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_etcd'] | default([])) }}" - -g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_lb'] | default([])) }}" - -g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_nfs'] | default([])) }}" - -g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_master'] | default([])) }}" - -g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_master'] | default([])) }}" - -g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_node'] | default([])) }}" - -g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type_new_node'] | default([])) }}" - -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_infra'] | default([])) }}" - -g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type_compute'] | default([])) }}" diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml deleted file mode 100644 index d60b68885..000000000 --- a/playbooks/aws/openshift-cluster/config.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- include: ../../common/openshift-cluster/config.yml - vars: - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - g_nodeonmaster: true - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_public_hostname: "{{ ec2_ip_address }}" - openshift_hosted_registry_selector: 'type=infra' - openshift_hosted_router_selector: 'type=infra' - openshift_node_labels: - region: "{{ deployment_vars[deployment_type].region }}" - type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }}" - openshift_master_cluster_method: 'native' - openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" - os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" - openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" - openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" - openshift_use_dnsmasq: false diff --git a/playbooks/aws/openshift-cluster/launch.yml b/playbooks/aws/openshift-cluster/launch.yml deleted file mode 100644 index 3edace493..000000000 --- a/playbooks/aws/openshift-cluster/launch.yml +++ /dev/null @@ -1,54 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ etcd_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - - - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - add_host: - name: "{{ master_names.0 }}" - groups: service_master - when: master_names is defined and master_names.0 is defined - -- include: update.yml -- include: list.yml diff --git a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py b/playbooks/aws/openshift-cluster/library/ec2_ami_find.py deleted file mode 100644 index 99d0f44f0..000000000 --- a/playbooks/aws/openshift-cluster/library/ec2_ami_find.py +++ /dev/null @@ -1,303 +0,0 @@ -#!/usr/bin/python -#pylint: skip-file -# flake8: noqa -# -# This file is part of Ansible -# -# Ansible is free software: you can redistribute it and/or modify -# it under the terms of the GNU General Public License as published by -# the Free Software Foundation, either version 3 of the License, or -# (at your option) any later version. -# -# Ansible is distributed in the hope that it will be useful, -# but WITHOUT ANY WARRANTY; without even the implied warranty of -# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the -# GNU General Public License for more details. -# -# You should have received a copy of the GNU General Public License -# along with Ansible. If not, see <http://www.gnu.org/licenses/>. - -DOCUMENTATION = ''' ---- -module: ec2_ami_find -version_added: 2.0 -short_description: Searches for AMIs to obtain the AMI ID and other information -description: - - Returns list of matching AMIs with AMI ID, along with other useful information - - Can search AMIs with different owners - - Can search by matching tag(s), by AMI name and/or other criteria - - Results can be sorted and sliced -author: Tom Bamford -notes: - - This module is not backwards compatible with the previous version of the ec2_search_ami module which worked only for Ubuntu AMIs listed on cloud-images.ubuntu.com. - - See the example below for a suggestion of how to search by distro/release. -options: - region: - description: - - The AWS region to use. - required: true - aliases: [ 'aws_region', 'ec2_region' ] - owner: - description: - - Search AMIs owned by the specified owner - - Can specify an AWS account ID, or one of the special IDs 'self', 'amazon' or 'aws-marketplace' - - If not specified, all EC2 AMIs in the specified region will be searched. - - You can include wildcards in many of the search options. An asterisk (*) matches zero or more characters, and a question mark (?) matches exactly one character. You can escape special characters using a backslash (\) before the character. For example, a value of \*amazon\?\\ searches for the literal string *amazon?\. - required: false - default: null - ami_id: - description: - - An AMI ID to match. - default: null - required: false - ami_tags: - description: - - A hash/dictionary of tags to match for the AMI. - default: null - required: false - architecture: - description: - - An architecture type to match (e.g. x86_64). - default: null - required: false - hypervisor: - description: - - A hypervisor type type to match (e.g. xen). - default: null - required: false - is_public: - description: - - Whether or not the image(s) are public. - choices: ['yes', 'no'] - default: null - required: false - name: - description: - - An AMI name to match. - default: null - required: false - platform: - description: - - Platform type to match. - default: null - required: false - sort: - description: - - Optional attribute which with to sort the results. - - If specifying 'tag', the 'tag_name' parameter is required. - choices: ['name', 'description', 'tag'] - default: null - required: false - sort_tag: - description: - - Tag name with which to sort results. - - Required when specifying 'sort=tag'. - default: null - required: false - sort_order: - description: - - Order in which to sort results. - - Only used when the 'sort' parameter is specified. - choices: ['ascending', 'descending'] - default: 'ascending' - required: false - sort_start: - description: - - Which result to start with (when sorting). - - Corresponds to Python slice notation. - default: null - required: false - sort_end: - description: - - Which result to end with (when sorting). - - Corresponds to Python slice notation. - default: null - required: false - state: - description: - - AMI state to match. - default: 'available' - required: false - virtualization_type: - description: - - Virtualization type to match (e.g. hvm). - default: null - required: false - no_result_action: - description: - - What to do when no results are found. - - "'success' reports success and returns an empty array" - - "'fail' causes the module to report failure" - choices: ['success', 'fail'] - default: 'success' - required: false -requirements: - - boto - -''' - -EXAMPLES = ''' -# Note: These examples do not set authentication details, see the AWS Guide for details. - -# Search for the AMI tagged "project:website" -- ec2_ami_find: - owner: self - tags: - project: website - no_result_action: fail - register: ami_find - -# Search for the latest Ubuntu 14.04 AMI -- ec2_ami_find: - name: "ubuntu/images/ebs/ubuntu-trusty-14.04-amd64-server-*" - owner: 099720109477 - sort: name - sort_order: descending - sort_end: 1 - register: ami_find - -# Launch an EC2 instance -- ec2: - image: "{{ ami_search.results[0].ami_id }}" - instance_type: m4.medium - key_name: mykey - wait: yes -''' - -try: - import boto.ec2 - HAS_BOTO=True -except ImportError: - HAS_BOTO=False - -import json - -def main(): - argument_spec = ec2_argument_spec() - argument_spec.update(dict( - region = dict(required=True, - aliases = ['aws_region', 'ec2_region']), - owner = dict(required=False, default=None), - ami_id = dict(required=False), - ami_tags = dict(required=False, type='dict', - aliases = ['search_tags', 'image_tags']), - architecture = dict(required=False), - hypervisor = dict(required=False), - is_public = dict(required=False), - name = dict(required=False), - platform = dict(required=False), - sort = dict(required=False, default=None, - choices=['name', 'description', 'tag']), - sort_tag = dict(required=False), - sort_order = dict(required=False, default='ascending', - choices=['ascending', 'descending']), - sort_start = dict(required=False), - sort_end = dict(required=False), - state = dict(required=False, default='available'), - virtualization_type = dict(required=False), - no_result_action = dict(required=False, default='success', - choices = ['success', 'fail']), - ) - ) - - module = AnsibleModule( - argument_spec=argument_spec, - ) - - if not HAS_BOTO: - module.fail_json(msg='boto required for this module, install via pip or your package manager') - - ami_id = module.params.get('ami_id') - ami_tags = module.params.get('ami_tags') - architecture = module.params.get('architecture') - hypervisor = module.params.get('hypervisor') - is_public = module.params.get('is_public') - name = module.params.get('name') - owner = module.params.get('owner') - platform = module.params.get('platform') - sort = module.params.get('sort') - sort_tag = module.params.get('sort_tag') - sort_order = module.params.get('sort_order') - sort_start = module.params.get('sort_start') - sort_end = module.params.get('sort_end') - state = module.params.get('state') - virtualization_type = module.params.get('virtualization_type') - no_result_action = module.params.get('no_result_action') - - filter = {'state': state} - - if ami_id: - filter['image_id'] = ami_id - if ami_tags: - for tag in ami_tags: - filter['tag:'+tag] = ami_tags[tag] - if architecture: - filter['architecture'] = architecture - if hypervisor: - filter['hypervisor'] = hypervisor - if is_public: - filter['is_public'] = is_public - if name: - filter['name'] = name - if platform: - filter['platform'] = platform - if virtualization_type: - filter['virtualization_type'] = virtualization_type - - ec2 = ec2_connect(module) - - images_result = ec2.get_all_images(owners=owner, filters=filter) - - if no_result_action == 'fail' and len(images_result) == 0: - module.fail_json(msg="No AMIs matched the attributes: %s" % json.dumps(filter)) - - results = [] - for image in images_result: - data = { - 'ami_id': image.id, - 'architecture': image.architecture, - 'description': image.description, - 'is_public': image.is_public, - 'name': image.name, - 'owner_id': image.owner_id, - 'platform': image.platform, - 'root_device_name': image.root_device_name, - 'root_device_type': image.root_device_type, - 'state': image.state, - 'tags': image.tags, - 'virtualization_type': image.virtualization_type, - } - - if image.kernel_id: - data['kernel_id'] = image.kernel_id - if image.ramdisk_id: - data['ramdisk_id'] = image.ramdisk_id - - results.append(data) - - if sort == 'tag': - if not sort_tag: - module.fail_json(msg="'sort_tag' option must be given with 'sort=tag'") - results.sort(key=lambda e: e['tags'][sort_tag], reverse=(sort_order=='descending')) - elif sort: - results.sort(key=lambda e: e[sort], reverse=(sort_order=='descending')) - - try: - if sort and sort_start and sort_end: - results = results[int(sort_start):int(sort_end)] - elif sort and sort_start: - results = results[int(sort_start):] - elif sort and sort_end: - results = results[:int(sort_end)] - except TypeError: - module.fail_json(msg="Please supply numeric values for sort_start and/or sort_end") - - module.exit_json(results=results) - -# import module snippets -from ansible.module_utils.basic import * -from ansible.module_utils.ec2 import * - -if __name__ == '__main__': - main() - diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml deleted file mode 100644 index ed8aac398..000000000 --- a/playbooks/aws/openshift-cluster/list.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Generate oo_list_hosts group - hosts: localhost - gather_facts: no - connection: local - become: no - vars_files: - - vars.yml - tasks: - - set_fact: scratch_group=tag_clusterid_{{ cluster_id }} - when: cluster_id != '' - - set_fact: scratch_group=all - when: cluster_id == '' - - add_host: - name: "{{ item }}" - groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - oo_public_ipv4: "{{ hostvars[item].ec2_ip_address }}" - oo_private_ipv4: "{{ hostvars[item].ec2_private_ip_address }}" - with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - - debug: - msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml new file mode 100644 index 000000000..dfbf61cc7 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision.yml @@ -0,0 +1,157 @@ +--- +- name: Setup the vpc and the master node group + hosts: localhost + tasks: + - name: get provisioning vars + include_vars: vars.yml + + - name: create default vpc + include_role: + name: openshift_aws_vpc + vars: + r_openshift_aws_vpc_clusterid: "{{ provision.clusterid }}" + r_openshift_aws_vpc_cidr: "{{ provision.vpc.cidr }}" + r_openshift_aws_vpc_subnets: "{{ provision.vpc.subnets }}" + r_openshift_aws_vpc_region: "{{ provision.region }}" + r_openshift_aws_vpc_tags: "{{ provision.vpc.tags }}" + r_openshift_aws_vpc_name: "{{ provision.vpc.name | default(provision.clusterid) }}" + + - name: create aws ssh keypair + include_role: + name: openshift_aws_ssh_keys + vars: + r_openshift_aws_ssh_keys_users: "{{ provision.instance_users }}" + r_openshift_aws_ssh_keys_region: "{{ provision.region }}" + + - when: provision.openshift_registry_s3 | default(false) + name: create s3 bucket for registry + include_role: + name: openshift_aws_s3 + vars: + r_openshift_aws_s3_clusterid: "{{ provision.clusterid }}-docker-registry" + r_openshift_aws_s3_region: "{{ provision.region }}" + r_openshift_aws_s3_mode: create + + - name: include scale group creation for master + include: build_node_group.yml + vars: + openshift_build_node_type: master + + - name: fetch new master instances + ec2_remote_facts: + region: "{{ provision.region }}" + filters: + "tag:clusterid": "{{ provision.clusterid }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + + - name: bring iam_cert23 into scope + include_role: + name: lib_utils + + - name: upload certificates to AWS IAM + iam_cert23: + state: present + name: "{{ provision.clusterid }}-master-external" + cert: "{{ provision.iam_cert_ca.cert_path }}" + key: "{{ provision.iam_cert_ca.key_path }}" + cert_chain: "{{ provision.iam_cert_ca.chain_path | default(omit) }}" + register: elb_cert_chain + failed_when: + - "'failed' in elb_cert_chain" + - elb_cert_chain.failed + - "'msg' in elb_cert_chain" + - "'already exists' not in elb_cert_chain.msg" + when: provision.iam_cert_ca is defined + + - debug: var=elb_cert_chain + + - name: create our master external and internal load balancers + include_role: + name: openshift_aws_elb + vars: + r_openshift_aws_elb_clusterid: "{{ provision.clusterid }}" + r_openshift_aws_elb_region: "{{ provision.region }}" + r_openshift_aws_elb_instance_filter: + "tag:clusterid": "{{ provision.clusterid }}" + "tag:host-type": master + instance-state-name: running + r_openshift_aws_elb_type: master + r_openshift_aws_elb_direction: "{{ elb_item }}" + r_openshift_aws_elb_idle_timout: 400 + r_openshift_aws_elb_scheme: internet-facing + r_openshift_aws_elb_security_groups: + - "{{ provision.clusterid }}" + - "{{ provision.clusterid }}_master" + r_openshift_aws_elb_subnet_name: "{{ provision.vpc.subnets[provision.region][0].az }}" + r_openshift_aws_elb_name: "{{ provision.clusterid }}-master-{{ elb_item }}" + r_openshift_aws_elb_cert_arn: "{{ elb_cert_chain.arn }}" + with_items: + - internal + - external + loop_control: + loop_var: elb_item + + - name: add new master to masters group + add_host: + groups: "masters,etcd,nodes" + name: "{{ item.public_ip_address }}" + hostname: "{{ provision.clusterid }}-master-{{ item.id[:-5] }}" + with_items: "{{ instancesout.instances }}" + + - name: set facts for group normalization + set_fact: + cluster_id: "{{ provision.clusterid }}" + cluster_env: "{{ provision.node_group_config.tags.environment | default('dev') }}" + + - name: wait for ssh to become available + wait_for: + port: 22 + host: "{{ item.public_ip_address }}" + timeout: 300 + search_regex: OpenSSH + with_items: "{{ instancesout.instances }}" + + +- name: set the master facts for hostname to elb + hosts: masters + gather_facts: no + remote_user: root + tasks: + - name: include vars + include_vars: vars.yml + + - name: fetch elbs + ec2_elb_facts: + region: "{{ provision.region }}" + names: + - "{{ item }}" + with_items: + - "{{ provision.clusterid }}-master-external" + - "{{ provision.clusterid }}-master-internal" + delegate_to: localhost + register: elbs + + - debug: var=elbs + + - name: set fact + set_fact: + openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}" + osm_custom_cors_origins: + - "{{ elbs.results[1].elbs[0].dns_name }}" + - "console.{{ provision.clusterid }}.openshift.com" + - "api.{{ provision.clusterid }}.openshift.com" + with_items: "{{ groups['masters'] }}" + +- name: normalize groups + include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include + include: ../../common/openshift-cluster/std_include.yml + +- name: run the config + include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/aws/openshift-cluster/provision_nodes.yml b/playbooks/aws/openshift-cluster/provision_nodes.yml new file mode 100644 index 000000000..5428fb307 --- /dev/null +++ b/playbooks/aws/openshift-cluster/provision_nodes.yml @@ -0,0 +1,47 @@ +--- +# Get bootstrap config token +# bootstrap should be created on first master +# need to fetch it and shove it into cloud data +- name: create the node scale groups + hosts: localhost + connection: local + gather_facts: yes + tasks: + - name: get provisioning vars + include_vars: vars.yml + + - name: fetch master instances + ec2_remote_facts: + region: "{{ provision.region }}" + filters: + "tag:clusterid": "{{ provision.clusterid }}" + "tag:host-type": master + instance-state-name: running + register: instancesout + retries: 20 + delay: 3 + until: instancesout.instances|length > 0 + + - name: slurp down the bootstrap.kubeconfig + slurp: + src: /etc/origin/master/bootstrap.kubeconfig + delegate_to: "{{ instancesout.instances[0].public_ip_address }}" + remote_user: root + register: bootstrap + + - name: set_fact on localhost for kubeconfig + set_fact: + local_bootstrap: "{{ bootstrap }}" + launch_config_name: + infra: "infra-{{ ansible_date_time.epoch }}" + compute: "compute-{{ ansible_date_time.epoch }}" + + - name: include build node group + include: build_node_group.yml + vars: + openshift_build_node_type: infra + + - name: include build node group + include: build_node_group.yml + vars: + openshift_build_node_type: compute diff --git a/playbooks/aws/openshift-cluster/scaleup.yml b/playbooks/aws/openshift-cluster/scaleup.yml deleted file mode 100644 index 6fa9142a0..000000000 --- a/playbooks/aws/openshift-cluster/scaleup.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- - -- hosts: localhost - gather_facts: no - connection: local - become: no - vars_files: - - vars.yml - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ groups.nodes_to_add }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: ../../common/openshift-cluster/scaleup.yml - vars_files: - - ../../aws/openshift-cluster/vars.yml - - ../../aws/openshift-cluster/cluster_hosts.yml - vars: - g_new_node_hosts: "{{ groups.nodes_to_add }}" - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - g_nodeonmaster: true - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_public_hostname: "{{ ec2_ip_address }}" diff --git a/playbooks/aws/openshift-cluster/service.yml b/playbooks/aws/openshift-cluster/service.yml deleted file mode 100644 index f7f4812bb..000000000 --- a/playbooks/aws/openshift-cluster/service.yml +++ /dev/null @@ -1,31 +0,0 @@ ---- -- name: Call same systemctl command for openshift on all instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - - cluster_hosts.yml - tasks: - - fail: msg="cluster_id is required to be injected in this playbook" - when: cluster_id is not defined - - - name: Evaluate g_service_masters - add_host: - name: "{{ item }}" - groups: g_service_masters - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ master_hosts | default([]) }}" - - - name: Evaluate g_service_nodes - add_host: - name: "{{ item }}" - groups: g_service_nodes - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ node_hosts | default([]) }}" - -- include: ../../common/openshift-node/service.yml -- include: ../../common/openshift-master/service.yml diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml deleted file mode 100644 index 608512b79..000000000 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ /dev/null @@ -1,188 +0,0 @@ ---- -- set_fact: - created_by: "{{ lookup('env', 'LOGNAME')|default(cluster, true) }}" - docker_vol_ephemeral: "{{ lookup('env', 'os_docker_vol_ephemeral') | default(false, true) }}" - cluster: "{{ cluster_id }}" - env: "{{ cluster_env }}" - host_type: "{{ type }}" - sub_host_type: "{{ g_sub_host_type }}" - -- set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_master_instance_type') | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ lookup('env', 'ec2_master_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" - when: host_type == "master" and sub_host_type == "default" - -- set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_etcd_instance_type') | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ lookup('env', 'ec2_etcd_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" - when: host_type == "etcd" and sub_host_type == "default" - -- set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_infra_instance_type') | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ lookup('env', 'ec2_infra_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" - when: host_type == "node" and sub_host_type == "infra" - -- set_fact: - ec2_instance_type: "{{ lookup('env', 'ec2_node_instance_type') | default(deployment_vars[deployment_type].type, true) }}" - ec2_security_groups: "{{ lookup('env', 'ec2_node_security_groups') | default(deployment_vars[deployment_type].security_groups, true) }}" - when: host_type == "node" and sub_host_type == "compute" - -- set_fact: - ec2_instance_type: "{{ deployment_vars[deployment_type].type }}" - when: ec2_instance_type is not defined -- set_fact: - ec2_security_groups: "{{ deployment_vars[deployment_type].security_groups }}" - when: ec2_security_groups is not defined - -- name: Find amis for deployment_type - ec2_ami_find: - region: "{{ deployment_vars[deployment_type].region }}" - ami_id: "{{ deployment_vars[deployment_type].image }}" - name: "{{ deployment_vars[deployment_type].image_name }}" - register: ami_result - -- fail: msg="Could not find requested ami" - when: not ami_result.results - -- set_fact: - latest_ami: "{{ ami_result.results | oo_ami_selector(deployment_vars[deployment_type].image_name) }}" - volume_defs: - etcd: - root: - volume_size: "{{ lookup('env', 'os_etcd_root_vol_size') | default(25, true) }}" - device_type: "{{ lookup('env', 'os_etcd_root_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_etcd_root_vol_iops') | default(500, true) }}" - master: - root: - volume_size: "{{ lookup('env', 'os_master_root_vol_size') | default(25, true) }}" - device_type: "{{ lookup('env', 'os_master_root_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_master_root_vol_iops') | default(500, true) }}" - docker: - volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(10, true) }}" - device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}" - node: - root: - volume_size: "{{ lookup('env', 'os_node_root_vol_size') | default(85, true) }}" - device_type: "{{ lookup('env', 'os_node_root_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_node_root_vol_iops') | default(500, true) }}" - docker: - volume_size: "{{ lookup('env', 'os_docker_vol_size') | default(32, true) }}" - device_type: "{{ lookup('env', 'os_docker_vol_type') | default('gp2', true) }}" - iops: "{{ lookup('env', 'os_docker_vol_iops') | default(500, true) }}" - -- set_fact: - volumes: "{{ volume_defs | oo_ec2_volume_definition(host_type, docker_vol_ephemeral | bool) }}" - -- name: Launch instance(s) - ec2: - state: present - region: "{{ deployment_vars[deployment_type].region }}" - keypair: "{{ deployment_vars[deployment_type].keypair }}" - group: "{{ deployment_vars[deployment_type].security_groups }}" - instance_type: "{{ ec2_instance_type }}" - image: "{{ deployment_vars[deployment_type].image }}" - count: "{{ instances | length }}" - vpc_subnet_id: "{{ deployment_vars[deployment_type].vpc_subnet }}" - assign_public_ip: "{{ deployment_vars[deployment_type].assign_public_ip }}" - user_data: "{{ lookup('template', '../templates/user_data.j2') }}" - wait: yes - instance_tags: - created-by: "{{ created_by }}" - clusterid: "{{ cluster }}" - environment: "{{ cluster_env }}" - host-type: "{{ host_type }}" - sub-host-type: "{{ sub_host_type }}" - volumes: "{{ volumes }}" - register: ec2 - -- name: Add Name tag to instances - ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present - with_together: - - "{{ instances }}" - - "{{ ec2.instances }}" - args: - tags: - Name: "{{ item.0 }}" - -- set_fact: - instance_groups: > - tag_created-by_{{ created_by }}, tag_clusterid_{{ cluster }}, - tag_environment_{{ cluster_env }}, tag_host-type_{{ host_type }}, - tag_sub-host-type_{{ sub_host_type }} - -- set_fact: - node_label: - region: "{{ deployment_vars[deployment_type].region }}" - type: "{{sub_host_type}}" - when: host_type == "node" - -- set_fact: - node_label: - region: "{{ deployment_vars[deployment_type].region }}" - type: "{{host_type}}" - when: host_type != "node" - -- set_fact: - logrotate: - - name: syslog - path: | - /var/log/cron - /var/log/maillog - /var/log/messages - /var/log/secure - /var/log/spooler" - options: - - daily - - rotate 7 - - compress - - sharedscripts - - missingok - scripts: - postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true" - -- name: Add new instances groups and variables - add_host: - hostname: "{{ item.0 }}" - ansible_ssh_host: "{{ item.1.dns_name }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: "{{ instance_groups }}" - ec2_private_ip_address: "{{ item.1.private_ip }}" - ec2_ip_address: "{{ item.1.public_ip }}" - ec2_tag_sub-host-type: "{{ sub_host_type }}" - openshift_node_labels: "{{ node_label }}" - logrotate_scripts: "{{ logrotate }}" - with_together: - - "{{ instances }}" - - "{{ ec2.instances }}" - -- name: Add new instances to nodes_to_add group if needed - add_host: - hostname: "{{ item.0 }}" - ansible_ssh_host: "{{ item.1.dns_name }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: nodes_to_add - ec2_private_ip_address: "{{ item.1.private_ip }}" - ec2_ip_address: "{{ item.1.public_ip }}" - openshift_node_labels: "{{ node_label }}" - logrotate_scripts: "{{ logrotate }}" - with_together: - - "{{ instances }}" - - "{{ ec2.instances }}" - when: oo_extend_env is defined and oo_extend_env | bool - -- name: Wait for ssh - wait_for: "port=22 host={{ item.dns_name }}" - with_items: "{{ ec2.instances }}" - -- name: Wait for user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup" - register: result - until: result.rc == 0 - retries: 20 - delay: 10 - with_together: - - "{{ instances }}" - - "{{ ec2.instances }}" diff --git a/playbooks/aws/openshift-cluster/templates/user_data.j2 b/playbooks/aws/openshift-cluster/templates/user_data.j2 deleted file mode 100644 index b1087f9c4..000000000 --- a/playbooks/aws/openshift-cluster/templates/user_data.j2 +++ /dev/null @@ -1,22 +0,0 @@ -#cloud-config -{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %} -mounts: -- [ xvdb ] -- [ ephemeral0 ] -{% endif %} - -write_files: -{% if type in ['node', 'master'] and 'docker' in volume_defs[type] %} -- content: | - DEVS=/dev/xvdb - VG=docker_vg - path: /etc/sysconfig/docker-storage-setup - owner: root:root - permissions: '0644' -{% endif %} -{% if deployment_vars[deployment_type].become | bool %} -- path: /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }}-cloud-init-requiretty - permissions: 440 - content: | - Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty -{% endif %} diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml deleted file mode 100644 index 1f15aa4bf..000000000 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ /dev/null @@ -1,77 +0,0 @@ ---- -- name: Terminate instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}" - -- name: Unsubscribe VMs - hosts: oo_hosts_to_terminate - roles: - - role: rhel_unsubscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - -- name: Terminate instances - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Remove tags from instances - ec2_tag: - resource: "{{ hostvars[item]['ec2_id'] }}" - region: "{{ hostvars[item]['ec2_region'] }}" - state: absent - tags: - environment: "{{ hostvars[item]['ec2_tag_environment'] }}" - clusterid: "{{ hostvars[item]['ec2_tag_clusterid'] }}" - host-type: "{{ hostvars[item]['ec2_tag_host-type'] }}" - sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}" - with_items: "{{ groups.oo_hosts_to_terminate }}" - when: "'oo_hosts_to_terminate' in groups" - - - name: Terminate instances - ec2: - state: absent - instance_ids: ["{{ hostvars[item].ec2_id }}"] - region: "{{ hostvars[item].ec2_region }}" - ignore_errors: yes - register: ec2_term - with_items: "{{ groups.oo_hosts_to_terminate }}" - when: "'oo_hosts_to_terminate' in groups" - - # Fail if any of the instances failed to terminate with an error other - # than 403 Forbidden - - fail: - msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}" - when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" - with_items: "{{ ec2_term.results }}" - - - name: Stop instance if termination failed - ec2: - state: stopped - instance_ids: ["{{ item.item.ec2_id }}"] - region: "{{ item.item.ec2_region }}" - register: ec2_stop - when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" - with_items: "{{ ec2_term.results }}" - - - name: Rename stopped instances - ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present - args: - tags: - Name: "{{ item.item.item.ec2_tag_Name }}-terminate" - with_items: "{{ ec2_stop.results }}" - when: ec2_stop | changed diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml deleted file mode 100644 index ed05d61ed..000000000 --- a/playbooks/aws/openshift-cluster/update.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- name: Update - Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Update - Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ g_all_hosts | default([]) }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: config.yml diff --git a/playbooks/aws/openshift-cluster/vars.yml b/playbooks/aws/openshift-cluster/vars.yml index d774187f0..47da03cb7 100644 --- a/playbooks/aws/openshift-cluster/vars.yml +++ b/playbooks/aws/openshift-cluster/vars.yml @@ -1,33 +1,113 @@ --- -debug_level: 2 - -deployment_rhel7_ent_base: - # rhel-7.1, requires cloud access subscription - image: "{{ lookup('oo_option', 'ec2_image') | default('ami-10251c7a', True) }}" - image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}" - region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}" - ssh_user: ec2-user - become: yes - keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}" - type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}" - security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}" - vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}" - assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}" - -deployment_vars: - origin: - # centos-7, requires marketplace - image: "{{ lookup('oo_option', 'ec2_image') | default('ami-6d1c2007', True) }}" - image_name: "{{ lookup('oo_option', 'ec2_image_name') | default(None, True) }}" - region: "{{ lookup('oo_option', 'ec2_region') | default('us-east-1', True) }}" - ssh_user: centos - become: yes - keypair: "{{ lookup('oo_option', 'ec2_keypair') | default('libra', True) }}" - type: "{{ lookup('oo_option', 'ec2_instance_type') | default('m4.large', True) }}" - security_groups: "{{ lookup('oo_option', 'ec2_security_groups') | default([ 'public' ], True) }}" - vpc_subnet: "{{ lookup('oo_option', 'ec2_vpc_subnet') | default(omit, True) }}" - assign_public_ip: "{{ lookup('oo_option', 'ec2_assign_public_ip') | default(omit, True) }}" - - enterprise: "{{ deployment_rhel7_ent_base }}" - openshift-enterprise: "{{ deployment_rhel7_ent_base }}" - atomic-enterprise: "{{ deployment_rhel7_ent_base }}" + +clusterid: mycluster +region: us-east-1 + +provision: + clusterid: "{{ clusterid }}" + region: "{{ region }}" + + build: # build specific variables here + ami_name: "openshift-gi-" + base_image: ami-bdd5d6ab # base image for AMI to build from + + # when creating an encrypted AMI please specify use_encryption + use_encryption: False + + openshift_ami_tags: + bootstrap: "true" + openshift-created: "true" + clusterid: "{{ clusterid }}" + + # Use s3 backed registry storage + openshift_registry_s3: True + + # if using custom certificates these are required for the ELB + iam_cert_ca: + name: "{{ clusterid }}_openshift" + cert_path: '/path/to/wildcard.<clusterid>.example.com.crt' + key_path: '/path/to/wildcard.<clusterid>.example.com.key' + chain_path: '/path/to/cert.ca.crt' + + instance_users: + - key_name: myuser_key + username: myuser + pub_key: | + ssh-rsa AAAA== myuser@system + + node_group_config: + tags: + clusterid: "{{ clusterid }}" + environment: stg + + ssh_key_name: myuser_key + + # master specific cluster node settings + master: + instance_type: m4.xlarge + ami: ami-cdeec8b6 # if using an encrypted AMI this will be replaced + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: False + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 3 + desired_size: 3 + tags: + host-type: master + sub-host-type: default + wait_for_instances: True + + # compute specific cluster node settings + compute: + instance_type: m4.xlarge + ami: ami-cdeec8b6 + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: True + health_check: + period: 60 + type: EC2 + min_size: 3 + max_size: 100 + desired_size: 3 + tags: + host-type: node + sub-host-type: compute + + # infra specific cluster node settings + infra: + instance_type: m4.xlarge + ami: ami-cdeec8b6 + volumes: + - device_name: /dev/sdb + volume_size: 100 + device_type: gp2 + delete_on_termination: True + health_check: + period: 60 + type: EC2 + min_size: 2 + max_size: 20 + desired_size: 2 + tags: + host-type: node + sub-host-type: infra + + # vpc settings + vpc: + cidr: 172.31.0.0/16 + subnets: + us-east-1: # These are us-east-1 region defaults. Ensure this matches your region + - cidr: 172.31.48.0/20 + az: "us-east-1c" + - cidr: 172.31.32.0/20 + az: "us-east-1e" + - cidr: 172.31.16.0/20 + az: "us-east-1a" diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml new file mode 100644 index 000000000..0e8e7a94d --- /dev/null +++ b/playbooks/byo/openshift-cfme/config.yml @@ -0,0 +1,8 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-cfme/config.yml diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..c8ed16859 --- /dev/null +++ b/playbooks/byo/openshift-cfme/uninstall.yml @@ -0,0 +1,6 @@ +--- +# - include: ../openshift-cluster/initialize_groups.yml +# tags: +# - always + +- include: ../../common/openshift-cfme/uninstall.yml diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md new file mode 100644 index 000000000..b26e7d7ed --- /dev/null +++ b/playbooks/byo/openshift-checks/README.md @@ -0,0 +1,104 @@ +# OpenShift health checks + +This directory contains Ansible playbooks for detecting potential problems prior +to an install, as well as health checks to run on existing OpenShift clusters. + +Ansible's default operation mode is to fail fast, on the first error. However, +when performing checks, it is useful to gather as much information about +problems as possible in a single run. + +Thus, the playbooks run a battery of checks against the inventory hosts and +gather intermediate errors, giving a more complete diagnostic of the state of +each host. If any check failed, the playbook run will be marked as failed. + +To facilitate understanding the problems that were encountered, a custom +callback plugin summarizes execution errors at the end of a playbook run. + +## Available playbooks + +1. Pre-install playbook ([pre-install.yml](pre-install.yml)) - verifies system + requirements and look for common problems that can prevent a successful + installation of a production cluster. + +2. Diagnostic playbook ([health.yml](health.yml)) - check an existing cluster + for known signs of problems. + +3. Certificate expiry playbooks ([certificate_expiry](certificate_expiry)) - + check that certificates in use are valid and not expiring soon. + +4. Adhoc playbook ([adhoc.yml](adhoc.yml)) - use it to run adhoc checks or to + list existing checks. + See the [next section](#the-adhoc-playbook) for a usage example. + +## Running + +With a [recent installation of Ansible](../../../README.md#setup), run the playbook +against your inventory file. Here is the step-by-step: + +1. If you haven't done it yet, clone this repository: + + ```console + $ git clone https://github.com/openshift/openshift-ansible + $ cd openshift-ansible + ``` + +2. Install the [dependencies](../../../README.md#setup) + +3. Run the appropriate playbook: + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml + ``` + + or + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml + ``` + + or + + ```console + $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v + ``` + +### The adhoc playbook + +The adhoc playbook gives flexibility to run any check or a custom group of +checks. What will be run is determined by the `openshift_checks` variable, +which, among other ways supported by Ansible, can be set on the command line +using the `-e` flag. + +For example, to run the `docker_storage` check: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage +``` + +To run more checks, use a comma-separated list of check names: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability +``` + +To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight +``` + +It is valid to specify multiple check tags and individual check names together +in a comma-separated list. + +To list all of the available checks and tags, run the adhoc playbook without +setting the `openshift_checks` variable: + +```console +$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml +``` + +## Running in a container + +This repository is built into a Docker image including Ansible so that it can +be run anywhere Docker is available, without the need to manually install dependencies. +Instructions for doing so may be found [in the README](../../../README_CONTAINER_IMAGE.md). diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/byo/openshift-checks/adhoc.yml new file mode 100644 index 000000000..226bed732 --- /dev/null +++ b/playbooks/byo/openshift-checks/adhoc.yml @@ -0,0 +1,27 @@ +--- +# NOTE: ideally this would be just part of a single play in +# common/openshift-checks/adhoc.yml that lists the existing checks when +# openshift_checks is not set or run the requested checks. However, to actually +# run the checks we need to have the included dependencies to run first and that +# takes time. To speed up listing checks, we use this separate play that runs +# before the include of dependencies to save time and improve the UX. +- name: OpenShift health checks + # NOTE: though the openshift_checks variable could be potentially defined on + # individual hosts while not defined for localhost, we do not support that + # usage. Running this play only in localhost speeds up execution. + hosts: localhost + connection: local + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: adhoc + pre_tasks: + - name: List known health checks + action: openshift_health_check + when: openshift_checks is undefined or not openshift_checks + +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/adhoc.yml diff --git a/playbooks/certificate_expiry/default.yaml b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml index 630135cae..630135cae 100644 --- a/playbooks/certificate_expiry/default.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml new file mode 100644 index 000000000..378d1f154 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml @@ -0,0 +1,40 @@ +# This example generates HTML and JSON reports and +# +# Copies of the generated HTML and JSON reports are uploaded to the masters, +# which is particularly useful when this playbook is run from a container. +# +# All certificates (healthy or not) are included in the results +# +# Optional environment variables to alter the behaviour of the playbook: +# CERT_EXPIRY_WARN_DAYS: Length of the warning window in days (45) +# COPY_TO_PATH: path to copy reports to in the masters (/etc/origin/certificate_expiration_report) +--- +- name: Generate certificate expiration reports + hosts: nodes:masters:etcd + gather_facts: no + vars: + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_show_all: yes + openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}" + roles: + - role: openshift_certificate_expiry + +- name: Upload reports to master + hosts: masters + gather_facts: no + vars: + destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}" + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + tasks: + - name: Ensure that the target directory exists + file: + path: "{{ destination_path }}" + state: directory + - name: Copy the reports + copy: + dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}" + src: "/tmp/{{ item }}" + with_items: + - "cert-expiry-report.html" + - "cert-expiry-report.json" diff --git a/playbooks/certificate_expiry/easy-mode.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml index ae41c7c14..ae41c7c14 100644 --- a/playbooks/certificate_expiry/easy-mode.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml diff --git a/playbooks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml index d80cb6ff4..d80cb6ff4 100644 --- a/playbooks/certificate_expiry/html_and_json_default_paths.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml new file mode 100644 index 000000000..2189455b7 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml @@ -0,0 +1,16 @@ +--- +# Generate timestamped HTML and JSON reports in /var/lib/certcheck + +- name: Check cert expirys + hosts: nodes:masters:etcd + become: yes + gather_facts: no + vars: + openshift_certificate_expiry_generate_html_report: yes + openshift_certificate_expiry_save_json_results: yes + openshift_certificate_expiry_show_all: yes + timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}" + openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html" + openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json" + roles: + - role: openshift_certificate_expiry diff --git a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml index 87a0f3be4..87a0f3be4 100644 --- a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml diff --git a/playbooks/certificate_expiry/longer_warning_period.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml index 960457c4b..960457c4b 100644 --- a/playbooks/certificate_expiry/longer_warning_period.yaml +++ b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles new file mode 120000 index 000000000..4bdbcbad3 --- /dev/null +++ b/playbooks/byo/openshift-checks/certificate_expiry/roles @@ -0,0 +1 @@ +../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml new file mode 100644 index 000000000..96a71e4dc --- /dev/null +++ b/playbooks/byo/openshift-checks/health.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/health.yml diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml new file mode 100644 index 000000000..dd93df0bb --- /dev/null +++ b/playbooks/byo/openshift-checks/pre-install.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-checks/pre-install.yml diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml index cb464cf0d..e807ac004 100644 --- a/playbooks/byo/openshift-cluster/cluster_hosts.yml +++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml @@ -1,6 +1,8 @@ --- g_etcd_hosts: "{{ groups.etcd | default([]) }}" +g_new_etcd_hosts: "{{ groups.new_etcd | default([]) }}" + g_lb_hosts: "{{ groups.lb | default([]) }}" g_master_hosts: "{{ groups.masters | default([]) }}" @@ -13,7 +15,12 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}" g_nfs_hosts: "{{ groups.nfs | default([]) }}" +g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}" + +g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}" + g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts) - | union(g_lb_hosts) | union(g_nfs_hosts) + | union(g_new_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts) | union(g_new_node_hosts)| union(g_new_master_hosts) + | union(g_glusterfs_hosts) | union(g_glusterfs_registry_hosts) | default([]) }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 86eff4ca4..acf5469bf 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always @@ -7,5 +11,4 @@ vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_type: "{{ deployment_type }}" openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml index 32f9ebfd3..9ce8f0d3c 100644 --- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml @@ -1,26 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +- include: initialize_groups.yml - include: ../../common/openshift-cluster/enable_dnsmasq.yml diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml new file mode 100644 index 000000000..2a725510a --- /dev/null +++ b/playbooks/byo/openshift-cluster/initialize_groups.yml @@ -0,0 +1,10 @@ +--- +- name: Create initial host groups for localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tags: + - always + tasks: + - include_vars: cluster_hosts.yml diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml index eebfcd20d..bbec3a4c2 100644 --- a/playbooks/byo/openshift-cluster/openshift-logging.yml +++ b/playbooks/byo/openshift-cluster/openshift-logging.yml @@ -4,32 +4,15 @@ # Hosted logging on. See inventory/byo/hosts.*.example for the # currently supported method. # -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no +- include: initialize_groups.yml tags: - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no +- include: ../../common/openshift-cluster/std_include.yml tags: - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - include: ../../common/openshift-cluster/openshift_logging.yml vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml new file mode 100644 index 000000000..1135c8c11 --- /dev/null +++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml @@ -0,0 +1,10 @@ +--- +- include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-cluster/openshift_metrics.yml diff --git a/playbooks/byo/openshift-cluster/openshift-provisioners.yml b/playbooks/byo/openshift-cluster/openshift-provisioners.yml new file mode 100644 index 000000000..8e80f158b --- /dev/null +++ b/playbooks/byo/openshift-cluster/openshift-provisioners.yml @@ -0,0 +1,6 @@ +--- +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-cluster/openshift_provisioners.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml index ad24b9ad0..a3894e243 100644 --- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always @@ -16,5 +20,7 @@ - include: ../../common/openshift-node/restart.yml - include: ../../common/openshift-cluster/redeploy-certificates/router.yml + when: openshift_hosted_manage_router | default(true) | bool - include: ../../common/openshift-cluster/redeploy-certificates/registry.yml + when: openshift_hosted_manage_registry | default(true) | bool diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml new file mode 100644 index 000000000..29f821eda --- /dev/null +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml @@ -0,0 +1,10 @@ +--- +- include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml index ee49364fa..8516baee8 100644 --- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml index 9c8248c4e..566e8b261 100644 --- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml index 1695111d0..42777e5e6 100644 --- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml index e44e95467..6e11a111b 100644 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml @@ -1,6 +1,10 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always -- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml +- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml index 53ee68db9..30feabab3 100644 --- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml index f8c267569..2630fb234 100644 --- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml +++ b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml @@ -1,4 +1,8 @@ --- +- include: initialize_groups.yml + tags: + - always + - include: ../../common/openshift-cluster/std_include.yml tags: - always diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml new file mode 100644 index 000000000..6f95b4e2d --- /dev/null +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -0,0 +1,18 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on. See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-cluster/service_catalog.yml + vars: + openshift_cluster_id: "{{ cluster_id | default('default') }}" + openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md index 0425ba518..0f64f40f3 100644 --- a/playbooks/byo/openshift-cluster/upgrades/README.md +++ b/playbooks/byo/openshift-cluster/upgrades/README.md @@ -4,5 +4,6 @@ cluster. Additional notes for the associated upgrade playbooks are provided in their respective directories. # Upgrades available -- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift origin from 1.4.x to 1.5.x) -- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift origin from 1.3.x to 1.4.x) +- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x) +- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x) +- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x) diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh deleted file mode 120000 index d5d864b63..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh +++ /dev/null @@ -1 +0,0 @@ -../../../../common/openshift-cluster/upgrades/files/nuke_images.sh
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml index d5fd7c424..7f31e26e1 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -1,37 +1,5 @@ --- # Playbook to upgrade Docker to the max allowable version for an OpenShift cluster. -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False +- include: ../../initialize_groups.yml -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../cluster_hosts.yml - -- include: ../../../../common/openshift-cluster/evaluate_groups.yml - vars: - # Do not allow adding hosts during upgrade. - g_new_master_hosts: [] - g_new_node_hosts: [] - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_deployment_type: "{{ deployment_type }}" - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - -- include: docker_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml index 106dcc12d..5bd5d64ab 100644 --- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml +++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml @@ -1,26 +1,6 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False +- include: ../initialize_groups.yml -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../cluster_hosts.yml +- include: ../../../common/openshift-cluster/evaluate_groups.yml - include: ../../../common/openshift-cluster/upgrades/etcd/main.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml index b1510e062..697a18c4d 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -2,106 +2,6 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml index b61d9e58a..4d284c279 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -11,101 +11,6 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_3/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml index f0b2a2c75..180a2821f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -4,103 +4,6 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - vars: - node_config_hook: "v3_3/node_config_upgrade.yml" +- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml index 82a1d0935..8cce91b3f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -2,104 +2,6 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml index 7ae1b3e6e..8e5d0f5f9 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -11,101 +11,6 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - vars: - master_config_hook: "v3_4/master_config_upgrade.yml" - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml index ec63ea60e..d5329b858 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -4,101 +4,6 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/playbooks/byo/openshift-cluster/upgrades/v3_5/roles deleted file mode 120000 index 6bc1a7aef..000000000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles +++ /dev/null @@ -1 +0,0 @@ -../../../../../roles
\ No newline at end of file diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml index 69cabcd33..f44d55ad2 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -2,104 +2,6 @@ # # Full Control Plane + Nodes Upgrade # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -- name: Configure the upgrade target for the common upgrade tasks - hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade - -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos and initialize facts on all hosts - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 719057d2b..2377713fa 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -11,101 +11,6 @@ # # You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on control plane hosts - hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config - tags: - - pre_upgrade - roles: - - openshift_repos - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-master/validate_restart.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_masters_to_config:oo_etcd_to_config - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml - -- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml index 259be6f8e..5b3f6ab06 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -4,101 +4,6 @@ # # Upgrades nodes only, but requires the control plane to have already been upgraded. # -- include: ../../../../common/openshift-cluster/upgrades/init.yml - tags: - - pre_upgrade +- include: ../../initialize_groups.yml -# Configure the upgrade target for the common upgrade tasks: -- hosts: l_oo_all_hosts - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" - openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" - -# Pre-upgrade -- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml - tags: - - pre_upgrade - -- name: Update repos on nodes - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config - roles: - - openshift_repos - tags: - - pre_upgrade - -- name: Set openshift_no_proxy_internal_hostnames - hosts: oo_masters_to_config:oo_nodes_to_upgrade - tags: - - pre_upgrade - tasks: - - set_fact: - openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] - | union(groups['oo_masters_to_config']) - | union(groups['oo_etcd_to_config'] | default([]))) - | oo_collect('openshift.common.hostname') | default([]) | join (',') - }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/disable_excluder.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml - tags: - - pre_upgrade - vars: - # Request specific openshift_release and let the openshift_version role handle converting this - # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if - # defined, and overriding the normal behavior of protecting the installed version - openshift_release: "{{ openshift_upgrade_target }}" - openshift_protect_installed_version: False - - # We skip the docker role at this point in upgrade to prevent - # unintended package, container, or config upgrades which trigger - # docker restarts. At this early stage of upgrade we can assume - # docker is configured and running. - skip_docker_role: True - -- name: Verify masters are already upgraded - hosts: oo_masters_to_config - tags: - - pre_upgrade - tasks: - - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." - when: openshift.common.version != openshift_version - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml - tags: - - pre_upgrade - -- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml - tags: - - pre_upgrade - -# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. - -# Separate step so we can execute in parallel and clear out anything unused -# before we get into the serialized upgrade process which will then remove -# remaining images if possible. -- name: Cleanup unused Docker images - hosts: oo_nodes_to_upgrade - tasks: - - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml - -- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml +- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md new file mode 100644 index 000000000..797af671a --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md @@ -0,0 +1,20 @@ +# v3.6 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the following steps. + + * Upgrade and restart master services + * Unschedule node + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml new file mode 100644 index 000000000..40120b3e8 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -0,0 +1,7 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml new file mode 100644 index 000000000..408a4c631 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -0,0 +1,16 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml new file mode 100644 index 000000000..b5f42b804 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -0,0 +1,9 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md new file mode 100644 index 000000000..4bf53be81 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md @@ -0,0 +1,20 @@ +# v3.6 Major and Minor Upgrade Playbook + +## Overview +This playbook currently performs the following steps. + + * Upgrade and restart master services + * Unschedule node + * Upgrade and restart docker + * Upgrade and restart node services + * Modifies the subset of the configuration necessary + * Applies the latest cluster policies + * Updates the default router if one exists + * Updates the default registry if one exists + * Updates image streams and quickstarts + +## Usage + +``` +ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml +``` diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml new file mode 100644 index 000000000..e41c29682 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -0,0 +1,7 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml new file mode 100644 index 000000000..21e0fd815 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -0,0 +1,16 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml new file mode 100644 index 000000000..0e09d996e --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -0,0 +1,9 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../../initialize_groups.yml + +- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml new file mode 100644 index 000000000..1342bd60c --- /dev/null +++ b/playbooks/byo/openshift-etcd/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/config.yml diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml new file mode 100644 index 000000000..2dec2bef6 --- /dev/null +++ b/playbooks/byo/openshift-etcd/migrate.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/migrate.yml diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml index 6713f07e3..034bba4b4 100644 --- a/playbooks/byo/openshift-etcd/restart.yml +++ b/playbooks/byo/openshift-etcd/restart.yml @@ -1,8 +1,6 @@ --- +- include: ../openshift-cluster/initialize_groups.yml + - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-etcd/restart.yml - vars: - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml new file mode 100644 index 000000000..a2a5856a9 --- /dev/null +++ b/playbooks/byo/openshift-etcd/scaleup.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-etcd/scaleup.yml diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md new file mode 100644 index 000000000..f62aea229 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/README.md @@ -0,0 +1,98 @@ +# OpenShift GlusterFS Playbooks + +These playbooks are intended to enable the use of GlusterFS volumes by pods in +OpenShift. While they try to provide a sane set of defaults they do cover a +variety of scenarios and configurations, so read carefully. :) + +## Playbook: config.yml + +This is the main playbook that integrates GlusterFS into a new or existing +OpenShift cluster. It will also, if specified, configure a hosted Docker +registry with GlusterFS backend storage. + +This playbook requires the `glusterfs` group to exist in the Ansible inventory +file. The hosts in this group are the nodes of the GlusterFS cluster. + + * If this is a newly configured cluster each host must have a + `glusterfs_devices` variable defined, each of which must be a list of block + storage devices intended for use only by the GlusterFS cluster. If this is + also an external GlusterFS cluster, you must specify + `openshift_storage_glusterfs_is_native=False`. If the cluster is to be + managed by an external heketi service you must also specify + `openshift_storage_glusterfs_heketi_is_native=False` and + `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi + service. All these variables are specified in `[OSEv3:vars]`, + * If this is an existing cluster you do not need to specify a list of block + devices but you must specify the following variables in `[OSEv3:vars]`: + * `openshift_storage_glusterfs_is_missing=False` + * `openshift_storage_glusterfs_heketi_is_missing=False` + +By default, pods for a native GlusterFS cluster will be created in the +`default` namespace. To change this, specify +`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`. + +To configure the deployment of a Docker registry with GlusterFS backend +storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in +`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the +registry, specify a `glusterfs_registry` group that is populated as the +`glusterfs` is with the nodes for the separate cluster. If no +`glusterfs_registry` group is specified, the cluster defined by the `glusterfs` +group will be used. + +To swap an existing hosted registry's backend storage for a GlusterFS volume, +specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To +additoinally copy any existing contents from an existing hosted registry, +specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`. + +**NOTE:** For each namespace that is to have access to GlusterFS volumes an +Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding +Service resource must be created. If dynamic provisioning using StorageClasses +is configure, these resources are created automatically in the namespaces that +require them. This playbook also takes care of creating these resources in the +namespaces used for deployment. + +An example of a minimal inventory file: +``` +[OSEv3:children] +masters +nodes +glusterfs + +[OSEv3:vars] +ansible_ssh_user=root +deployment_type=origin + +[masters] +master + +[nodes] +node0 +node1 +node2 + +[glusterfs] +node0 glusterfs_devices='[ "/dev/sdb" ]' +node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]' +node2 glusterfs_devices='[ "/dev/sdd" ]' +``` + +## Playbook: registry.yml + +This playbook is intended for admins who want to deploy a hosted Docker +registry with GlusterFS backend storage on an existing OpenShift cluster. It +has all the same requirements and behaviors as `config.yml`. + +## Role: openshift_storage_glusterfs + +The bulk of the work is done by the `openshift_storage_glusterfs` role. This +role can handle the deployment of GlusterFS (if it is to be hosted on the +OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone), +and (if specified) integration as backend storage for a hosted Docker registry. + +See the documentation in the role's directory for further details. + +## Role: openshift_hosted + +The `openshift_hosted` role recognizes `glusterfs` as a possible storage +backend for a hosted docker registry. It will also, if configured, handle the +swap of an existing registry's backend storage to a GlusterFS volume. diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml new file mode 100644 index 000000000..3f11f3991 --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/config.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/config.yml diff --git a/playbooks/gce/openshift-cluster/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/gce/openshift-cluster/filter_plugins +++ b/playbooks/byo/openshift-glusterfs/filter_plugins diff --git a/playbooks/gce/openshift-cluster/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins index ac79701db..ac79701db 120000 --- a/playbooks/gce/openshift-cluster/lookup_plugins +++ b/playbooks/byo/openshift-glusterfs/lookup_plugins diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..6ee6febdb --- /dev/null +++ b/playbooks/byo/openshift-glusterfs/registry.yml @@ -0,0 +1,10 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + tags: + - always + +- include: ../../common/openshift-cluster/std_include.yml + tags: + - always + +- include: ../../common/openshift-glusterfs/registry.yml diff --git a/playbooks/byo/openshift-preflight/roles b/playbooks/byo/openshift-glusterfs/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/byo/openshift-preflight/roles +++ b/playbooks/byo/openshift-glusterfs/roles diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml new file mode 100644 index 000000000..98be0c448 --- /dev/null +++ b/playbooks/byo/openshift-master/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-master/config.yml diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 2d20f69f4..8950efd00 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -1,8 +1,6 @@ --- +- include: ../openshift-cluster/initialize_groups.yml + - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-master/restart.yml - vars: - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index 7075bb59e..e3ef704e5 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -1,30 +1,23 @@ --- -- name: Create initial host groups for localhost +- include: ../openshift-cluster/initialize_groups.yml + +- name: Ensure there are new_masters hosts: localhost connection: local become: no gather_facts: no - tags: - - always tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False + - fail: + msg: > + Detected no new_masters or no new_nodes in inventory. Please + add hosts to the new_masters and new_nodes host groups to add + masters. + when: + - (g_new_master_hosts | default([]) | length == 0) or (g_new_node_hosts | default([]) | length == 0) -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +- include: ../../common/openshift-cluster/std_include.yml - include: ../../common/openshift-master/scaleup.yml vars: openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml new file mode 100644 index 000000000..839dc36ff --- /dev/null +++ b/playbooks/byo/openshift-node/config.yml @@ -0,0 +1,6 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml + +- include: ../../common/openshift-cluster/std_include.yml + +- include: ../../common/openshift-node/config.yml diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml index 9bb3ea17f..b23692237 100644 --- a/playbooks/byo/openshift-node/network_manager.yml +++ b/playbooks/byo/openshift-node/network_manager.yml @@ -1,42 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False +- include: ../openshift-cluster/initialize_groups.yml -- name: Install and configure NetworkManager - hosts: l_oo_all_hosts - become: yes - tasks: - - name: install NetworkManager - package: - name: 'NetworkManager' - state: present - - - name: configure NetworkManager - lineinfile: - dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" - regexp: '^{{ item }}=' - line: '{{ item }}=yes' - state: present - create: yes - with_items: - - 'USE_PEERDNS' - - 'NM_CONTROLLED' - - - name: enable and start NetworkManager - service: - name: 'NetworkManager' - state: started - enabled: yes +- include: ../../common/openshift-node/network_manager.yml diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml index 3985a83bb..ccf9e82da 100644 --- a/playbooks/byo/openshift-node/restart.yml +++ b/playbooks/byo/openshift-node/restart.yml @@ -1,8 +1,6 @@ --- +- include: ../openshift-cluster/initialize_groups.yml + - include: ../../common/openshift-cluster/std_include.yml - tags: - - always - include: ../../common/openshift-node/restart.yml - vars: - openshift_deployment_type: "{{ deployment_type }}" diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index 2b10b6c76..e0c36fb69 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -1,32 +1,19 @@ --- -- name: Create initial host groups for localhost +- include: ../openshift-cluster/initialize_groups.yml + +- name: Ensure there are new_nodes hosts: localhost connection: local become: no gather_facts: no - tags: - - always tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False + - fail: + msg: > + Detected no new_nodes in inventory. Please add hosts to the + new_nodes host group to add nodes. + when: + - g_new_node_hosts | default([]) | length == 0 -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml +- include: ../../common/openshift-cluster/std_include.yml -- include: ../../common/openshift-node/scaleup.yml - vars: - openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_debug_level: "{{ debug_level | default(2) }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}" - openshift_master_etcd_port: 2379 +- include: ../../common/openshift-node/config.yml diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md deleted file mode 100644 index b50292eac..000000000 --- a/playbooks/byo/openshift-preflight/README.md +++ /dev/null @@ -1,43 +0,0 @@ -# OpenShift preflight checks - -Here we provide an Ansible playbook for detecting potential roadblocks prior to -an install or upgrade. - -Ansible's default operation mode is to fail fast, on the first error. However, -when performing checks, it is useful to gather as much information about -problems as possible in a single run. - -The `check.yml` playbook runs a battery of checks against the inventory hosts -and tells Ansible to ignore intermediate errors, thus giving a more complete -diagnostic of the state of each host. Still, if any check failed, the playbook -run will be marked as having failed. - -To facilitate understanding the problems that were encountered, we provide a -custom callback plugin to summarize execution errors at the end of a playbook -run. - ---- - -*Note that currently the `check.yml` playbook is only useful for RPM-based -installations. Containerized installs are excluded from checks for now, but -might be included in the future if there is demand for that.* - ---- - -## Running - -With an installation of Ansible 2.2 or greater, run the playbook directly -against your inventory file. Here is the step-by-step: - -1. If you haven't done it yet, clone this repository: - - ```console - $ git clone https://github.com/openshift/openshift-ansible - $ cd openshift-ansible - ``` - -2. Run the playbook: - - ```console - $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml - ``` diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml index c5f05d0f0..2e53452a6 100644 --- a/playbooks/byo/openshift-preflight/check.yml +++ b/playbooks/byo/openshift-preflight/check.yml @@ -1,12 +1,3 @@ --- -- hosts: OSEv3 - name: run OpenShift health checks - roles: - - openshift_health_checker - post_tasks: - # NOTE: we need to use the old "action: name" syntax until - # https://github.com/ansible/ansible/issues/20513 is fixed. - - action: openshift_health_check - args: - checks: - - '@preflight' +# location is moved; this file remains so existing instructions keep working +- include: ../openshift-checks/pre-install.yml diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml index a21b6a0a5..a8c1c3a88 100644 --- a/playbooks/byo/openshift_facts.yml +++ b/playbooks/byo/openshift_facts.yml @@ -1,9 +1,14 @@ --- +- include: openshift-cluster/initialize_groups.yml + tags: + - always + - include: ../common/openshift-cluster/std_include.yml tags: - always - name: Gather Cluster facts + # Temporarily reverting to OSEv3 until group standardization is complete hosts: OSEv3 roles: - openshift_facts diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index 65c0b1c01..1b14ff32e 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -1,12 +1,11 @@ --- -- include: ../common/openshift-cluster/std_include.yml +- include: openshift-cluster/initialize_groups.yml tags: - always - name: Subscribe hosts, update repos and update OS packages - hosts: l_oo_all_hosts - vars: - openshift_deployment_type: "{{ deployment_type }}" + # Temporarily reverting to OSEv3 until group standardization is complete + hosts: OSEv3 roles: - role: rhel_subscribe when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml deleted file mode 100644 index 76246e7b0..000000000 --- a/playbooks/byo/vagrant.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: rhel_subscribe.yml - -- include: config.yml diff --git a/playbooks/certificate_expiry b/playbooks/certificate_expiry new file mode 120000 index 000000000..9cf5334a1 --- /dev/null +++ b/playbooks/certificate_expiry @@ -0,0 +1 @@ +byo/openshift-checks/certificate_expiry/
\ No newline at end of file diff --git a/playbooks/certificate_expiry/roles b/playbooks/certificate_expiry/roles deleted file mode 120000 index b741aa3db..000000000 --- a/playbooks/certificate_expiry/roles +++ /dev/null @@ -1 +0,0 @@ -../../roles
\ No newline at end of file diff --git a/playbooks/common/README.md b/playbooks/common/README.md index 0b5e26989..968bd99cb 100644 --- a/playbooks/common/README.md +++ b/playbooks/common/README.md @@ -1,9 +1,8 @@ # Common playbooks This directory has a generic set of playbooks that are included by playbooks in -[`byo`](../byo), as well as other playbooks related to the -[`bin/cluster`](../../bin) tool. +[`byo`](../byo). Note: playbooks in this directory use generic group names that do not line up -with the groups used by the `byo` playbooks or `bin/cluster` derived playbooks, -requiring an explicit remapping of groups. +with the groups used by the `byo` playbooks, requiring an explicit remapping of +groups. diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml new file mode 100644 index 000000000..533a35d9e --- /dev/null +++ b/playbooks/common/openshift-cfme/config.yml @@ -0,0 +1,44 @@ +--- +# TODO: Make this work. The 'name' variable below is undefined +# presently because it's part of the cfme role. This play can't run +# until that's re-worked. +# +# - name: Pre-Pull manageiq-pods docker images +# hosts: nodes +# tasks: +# - name: Ensure the latest manageiq-pods docker image is pulling +# docker_image: +# name: "{{ openshift_cfme_container_image }}" +# # Fire-and-forget method, never timeout +# async: 99999999999 +# # F-a-f, never check on this. True 'background' task. +# poll: 0 + +- name: Configure Masters for CFME Bulk Image Imports + hosts: oo_masters_to_config + serial: 1 + tasks: + - name: Run master cfme tuning playbook + include_role: + name: openshift_cfme + tasks_from: tune_masters + +- name: Setup CFME + hosts: oo_first_master + vars: + r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" + pre_tasks: + - name: Create a temporary place to evaluate the PV templates + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: r_openshift_cfme_mktemp + changed_when: false + - name: Ensure the server template was read from disk + debug: + msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" + + tasks: + - name: Run the CFME Setup Role + include_role: + name: openshift_cfme + vars: + template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/libvirt/openshift-cluster/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/libvirt/openshift-cluster/filter_plugins +++ b/playbooks/common/openshift-cfme/filter_plugins diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/common/openshift-cfme/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/gce/openshift-cluster/roles b/playbooks/common/openshift-cfme/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/gce/openshift-cluster/roles +++ b/playbooks/common/openshift-cfme/roles diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..78b8e7668 --- /dev/null +++ b/playbooks/common/openshift-cfme/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall CFME + hosts: masters + tasks: + - name: Run the CFME Uninstall Role Tasks + include_role: + name: openshift_cfme + tasks_from: uninstall diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml new file mode 100644 index 000000000..dfcef8435 --- /dev/null +++ b/playbooks/common/openshift-checks/adhoc.yml @@ -0,0 +1,12 @@ +--- +- name: OpenShift health checks + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: adhoc + post_tasks: + - name: Run health checks + action: openshift_health_check + args: + checks: '{{ openshift_checks | default([]) }}' diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml new file mode 100644 index 000000000..21ea785ef --- /dev/null +++ b/playbooks/common/openshift-checks/health.yml @@ -0,0 +1,11 @@ +--- +- name: Run OpenShift health checks + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: health + post_tasks: + - action: openshift_health_check + args: + checks: ['@health'] diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml new file mode 100644 index 000000000..88e6f9120 --- /dev/null +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -0,0 +1,11 @@ +--- +- name: run OpenShift pre-install checks + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: pre-install + post_tasks: + - action: openshift_health_check + args: + checks: ['@preflight'] diff --git a/playbooks/libvirt/openshift-cluster/roles b/playbooks/common/openshift-checks/roles index 20c4c58cf..20c4c58cf 120000 --- a/playbooks/libvirt/openshift-cluster/roles +++ b/playbooks/common/openshift-checks/roles diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 113b401f9..5f420a76c 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,38 +1,28 @@ --- -- name: Set oo_option facts +# TODO: refactor this into its own include +# and pass a variable for ctx +- name: Verify Requirements hosts: oo_all_hosts - tags: - - always - tasks: - - set_fact: - openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" - when: openshift_docker_additional_registries is not defined - - set_fact: - openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" - when: openshift_docker_insecure_registries is not defined - - set_fact: - openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" - when: openshift_docker_blocked_registries is not defined - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined - - set_fact: - openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" - when: openshift_docker_selinux_enabled is not defined - -- include: disable_excluder.yml + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: install + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability + - package_availability + - package_version + - docker_image_availability + - docker_storage + +- include: initialize_oo_option_facts.yml tags: - always - include: ../openshift-etcd/config.yml - tags: - - etcd - include: ../openshift-nfs/config.yml tags: @@ -43,17 +33,21 @@ - loadbalancer - include: ../openshift-master/config.yml - tags: - - master - -- include: additional_config.yml - tags: - - master - include: ../openshift-node/config.yml tags: - node +- include: ../openshift-glusterfs/config.yml + tags: + - glusterfs + - include: openshift_hosted.yml tags: - hosted + +- include: service_catalog.yml + when: + - openshift_enable_service_catalog | default(false) | bool + tags: + - servicecatalog diff --git a/playbooks/common/openshift-cluster/disable_excluder.yml b/playbooks/common/openshift-cluster/disable_excluder.yml deleted file mode 100644 index eb146bab8..000000000 --- a/playbooks/common/openshift-cluster/disable_excluder.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Record excluder state and disable - hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_role: - name: openshift_excluder - tasks_from: status - - include_role: - name: openshift_excluder - tasks_from: unexclude diff --git a/playbooks/common/openshift-cluster/enable_dnsmasq.yml b/playbooks/common/openshift-cluster/enable_dnsmasq.yml index ca5177852..50351588f 100644 --- a/playbooks/common/openshift-cluster/enable_dnsmasq.yml +++ b/playbooks/common/openshift-cluster/enable_dnsmasq.yml @@ -37,7 +37,7 @@ dest: "{{ openshift.common.config_base }}/master/master-config.yaml" yaml_key: dnsConfig.bindAddress yaml_value: "{{ openshift.master.bind_addr }}:{{ openshift.master.dns_port }}" - notify: restart master + notify: restart master api - meta: flush_handlers - name: Configure nodes for dnsmasq @@ -56,8 +56,6 @@ - role: node local_facts: dns_ip: "{{ hostvars[inventory_hostname]['ansible_default_ipv4']['address'] }}" - vars: - openshift_deployment_type: "{{ deployment_type }}" roles: - openshift_node_dnsmasq post_tasks: diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 45a4875a3..c9f37109b 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -5,29 +5,49 @@ become: no gather_facts: no tasks: - - fail: - msg: This playbook requires g_etcd_hosts to be set - when: "{{ g_etcd_hosts is not defined }}" + - name: Evaluate groups - g_etcd_hosts or g_new_etcd_hosts required + fail: + msg: This playbook requires g_etcd_hosts or g_new_etcd_hosts to be set + when: g_etcd_hosts is not defined and g_new_etcd_hosts is not defined - - fail: + - name: Evaluate groups - g_master_hosts or g_new_master_hosts required + fail: msg: This playbook requires g_master_hosts or g_new_master_hosts to be set - when: "{{ g_master_hosts is not defined and g_new_master_hosts is not defined }}" + when: g_master_hosts is not defined and g_new_master_hosts is not defined - - fail: + - name: Evaluate groups - g_node_hosts or g_new_node_hosts required + fail: msg: This playbook requires g_node_hosts or g_new_node_hosts to be set - when: "{{ g_node_hosts is not defined and g_new_node_hosts is not defined }}" + when: g_node_hosts is not defined and g_new_node_hosts is not defined - - fail: + - name: Evaluate groups - g_lb_hosts required + fail: msg: This playbook requires g_lb_hosts to be set - when: "{{ g_lb_hosts is not defined }}" + when: g_lb_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts required + fail: msg: This playbook requires g_nfs_hosts to be set - when: "{{ g_nfs_hosts is not defined }}" + when: g_nfs_hosts is not defined - - fail: + - name: Evaluate groups - g_nfs_hosts is single host + fail: msg: The nfs group must be limited to one host - when: "{{ (groups[g_nfs_hosts] | default([])) | length > 1 }}" + when: g_nfs_hosts | default([]) | length > 1 + + - name: Evaluate groups - g_glusterfs_hosts required + fail: + msg: This playbook requires g_glusterfs_hosts to be set + when: g_glusterfs_hosts is not defined + + - name: Evaluate groups - Fail if no etcd hosts group is defined + fail: + msg: > + No etcd hosts defined. Running an all-in-one master is deprecated and + will no longer be supported in a future upgrade. + when: + - g_etcd_hosts | default([]) | length == 0 + - not openshift_master_unsupported_all_in_one | default(False) - name: Evaluate oo_all_hosts add_host: @@ -47,13 +67,22 @@ with_items: "{{ g_master_hosts | union(g_new_master_hosts) | default([]) }}" changed_when: no - - name: Evaluate oo_etcd_to_config + - name: Evaluate oo_first_master + add_host: + name: "{{ g_master_hosts[0] }}" + groups: oo_first_master + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + when: g_master_hosts|length > 0 + changed_when: no + + - name: Evaluate oo_new_etcd_to_config add_host: name: "{{ item }}" - groups: oo_etcd_to_config + groups: oo_new_etcd_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_etcd_hosts | default([]) }}" + with_items: "{{ g_new_etcd_hosts | default([]) }}" changed_when: no - name: Evaluate oo_masters_to_config @@ -65,6 +94,41 @@ with_items: "{{ g_new_master_hosts | default(g_master_hosts | default([], true), true) }}" changed_when: no + - name: Evaluate oo_etcd_to_config + add_host: + name: "{{ item }}" + groups: oo_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_etcd_hosts | default([]) }}" + changed_when: no + + - name: Evaluate oo_first_etcd + add_host: + name: "{{ g_etcd_hosts[0] }}" + groups: oo_first_etcd + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + when: g_etcd_hosts|length > 0 + changed_when: no + + # We use two groups one for hosts we're upgrading which doesn't include embedded etcd + # The other for backing up which includes the embedded etcd host, there's no need to + # upgrade embedded etcd that just happens when the master is updated. + - name: Evaluate oo_etcd_hosts_to_upgrade + add_host: + name: "{{ item }}" + groups: oo_etcd_hosts_to_upgrade + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" + changed_when: False + + - name: Evaluate oo_etcd_hosts_to_backup + add_host: + name: "{{ item }}" + groups: oo_etcd_hosts_to_backup + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else (groups.oo_first_master | default([])) }}" + changed_when: False + - name: Evaluate oo_nodes_to_config add_host: name: "{{ item }}" @@ -82,40 +146,41 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_master_hosts | default([]) }}" - when: "{{ g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool }}" + when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool changed_when: no - - name: Evaluate oo_first_etcd + - name: Evaluate oo_lb_to_config add_host: - name: "{{ g_etcd_hosts[0] }}" - groups: oo_first_etcd + name: "{{ item }}" + groups: oo_lb_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - when: "{{ g_etcd_hosts|length > 0 }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ g_lb_hosts | default([]) }}" changed_when: no - - name: Evaluate oo_first_master + - name: Evaluate oo_nfs_to_config add_host: - name: "{{ g_master_hosts[0] }}" - groups: oo_first_master + name: "{{ item }}" + groups: oo_nfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - when: "{{ g_master_hosts|length > 0 }}" + with_items: "{{ g_nfs_hosts | default([]) }}" changed_when: no - - name: Evaluate oo_lb_to_config + - name: Evaluate oo_glusterfs_to_config add_host: name: "{{ item }}" - groups: oo_lb_to_config + groups: oo_glusterfs_to_config ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_lb_hosts | default([]) }}" + with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}" changed_when: no - - name: Evaluate oo_nfs_to_config + - name: Evaluate oo_etcd_to_migrate add_host: name: "{{ item }}" - groups: oo_nfs_to_config + groups: oo_etcd_to_migrate ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" - with_items: "{{ g_nfs_hosts | default([]) }}" + with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config | default([]) | length != 0 else (groups.oo_first_master |default([]))}}" changed_when: no diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml index 18f99728c..9eaf3bc34 100644 --- a/playbooks/common/openshift-cluster/initialize_facts.yml +++ b/playbooks/common/openshift-cluster/initialize_facts.yml @@ -6,14 +6,155 @@ - name: Initialize host facts hosts: oo_all_hosts - roles: - - openshift_facts tasks: - - openshift_facts: + - name: load openshift_facts module + include_role: + name: openshift_facts + + # TODO: Should this role be refactored into health_checks?? + - name: Run openshift_sanitize_inventory to set variables + include_role: + name: openshift_sanitize_inventory + + - name: Detecting Operating System from ostree_booted + stat: + path: /run/ostree-booted + register: ostree_booted + + # Locally setup containerized facts for now + - name: initialize_facts set fact l_is_atomic + set_fact: + l_is_atomic: "{{ ostree_booted.stat.exists }}" + + - name: initialize_facts set fact for containerized and l_is_*_system_container + set_fact: + l_is_containerized: "{{ (l_is_atomic | bool) or (containerized | default(false) | bool) }}" + l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_master_system_container: "{{ (openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}" + + - name: initialize_facts set facts for l_any_system_container + set_fact: + l_any_system_container: "{{ l_is_etcd_system_container or l_is_openvswitch_system_container or l_is_node_system_container or l_is_master_system_container }}" + + - name: initialize_facts set fact for l_etcd_runtime + set_fact: + l_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + - name: Validate python version - ans_dist is fedora and python is v3 + fail: + msg: | + openshift-ansible requires Python 3 for {{ ansible_distribution }}; + For information on enabling Python 3 with Ansible, see https://docs.ansible.com/ansible/python_3_support.html + when: + - ansible_distribution == 'Fedora' + - ansible_python['version']['major'] != 3 + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + - name: Validate python version - ans_dist not Fedora and python must be v2 + fail: + msg: "openshift-ansible requires Python 2 for {{ ansible_distribution }}" + when: + - ansible_distribution != 'Fedora' + - ansible_python['version']['major'] != 2 + + # TODO: Should this be moved into health checks?? + # Seems as though any check that happens with a corresponding fail should move into health_checks + # Fail as early as possible if Atomic and old version of Docker + - when: + - l_is_atomic | bool + block: + + # See https://access.redhat.com/articles/2317361 + # and https://github.com/ansible/ansible/issues/15892 + # NOTE: the "'s can not be removed at this level else the docker command will fail + # NOTE: When ansible >2.2.1.x is used this can be updated per + # https://github.com/openshift/openshift-ansible/pull/3475#discussion_r103525121 + - name: Determine Atomic Host Docker Version + shell: 'CURLY="{"; docker version --format "$CURLY{json .Server.Version}}"' + register: l_atomic_docker_version + + - name: assert atomic host docker version is 1.12 or later + assert: + that: + - l_atomic_docker_version.stdout | replace('"', '') | version_compare('1.12','>=') + msg: Installation on Atomic Host requires Docker 1.12 or later. Please upgrade and restart the Atomic Host. + + - when: + - not l_is_atomic | bool + block: + - name: Ensure openshift-ansible installer package deps are installed + package: + name: "{{ item }}" + state: present + with_items: + - iproute + - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'python-dbus' }}" + - PyYAML + - yum-utils + + - name: Ensure various deps for running system containers are installed + package: + name: "{{ item }}" + state: present + with_items: + - atomic + - ostree + - runc + when: + - l_any_system_container | bool + + - name: Default system_images_registry to a enterprise registry + set_fact: + system_images_registry: "registry.access.redhat.com" + when: + - system_images_registry is not defined + - openshift_deployment_type == "openshift-enterprise" + + - name: Default system_images_registry to community registry + set_fact: + system_images_registry: "docker.io" + when: + - system_images_registry is not defined + - openshift_deployment_type == "origin" + + - name: Gather Cluster facts and set is_containerized if needed + openshift_facts: role: common local_facts: + debug_level: "{{ openshift_debug_level | default(2) }}" + deployment_type: "{{ openshift_deployment_type }}" + deployment_subtype: "{{ openshift_deployment_subtype | default(None) }}" + cli_image: "{{ osm_image | default(None) }}" + cluster_id: "{{ openshift_cluster_id | default('default') }}" hostname: "{{ openshift_hostname | default(None) }}" - - set_fact: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - - set_fact: - openshift_deployment_type: "{{ deployment_type }}" + ip: "{{ openshift_ip | default(None) }}" + is_containerized: "{{ l_is_containerized | default(None) }}" + is_openvswitch_system_container: "{{ l_is_openvswitch_system_container | default(false) }}" + is_node_system_container: "{{ l_is_node_system_container | default(false) }}" + is_master_system_container: "{{ l_is_master_system_container | default(false) }}" + is_etcd_system_container: "{{ l_is_etcd_system_container | default(false) }}" + etcd_runtime: "{{ l_etcd_runtime }}" + system_images_registry: "{{ system_images_registry }}" + public_hostname: "{{ openshift_public_hostname | default(None) }}" + public_ip: "{{ openshift_public_ip | default(None) }}" + portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" + http_proxy: "{{ openshift_http_proxy | default(None) }}" + https_proxy: "{{ openshift_https_proxy | default(None) }}" + no_proxy: "{{ openshift_no_proxy | default(None) }}" + generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}" + no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}" + sdn_network_plugin_name: "{{ os_sdn_network_plugin_name | default(None) }}" + use_openshift_sdn: "{{ openshift_use_openshift_sdn | default(None) }}" + + - name: initialize_facts set_fact repoquery command + set_fact: + repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" + + - name: initialize_facts set_fact on openshift_docker_hosted_registry_network + set_fact: + openshift_docker_hosted_registry_network: "{{ '' if 'oo_first_master' not in groups else hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" diff --git a/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml new file mode 100644 index 000000000..ac3c702a0 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_oo_option_facts.yml @@ -0,0 +1,27 @@ +--- +- name: Set oo_option facts + hosts: oo_all_hosts + tags: + - always + tasks: + - set_fact: + openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" + when: openshift_docker_additional_registries is not defined + - set_fact: + openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" + when: openshift_docker_insecure_registries is not defined + - set_fact: + openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" + when: openshift_docker_blocked_registries is not defined + - set_fact: + openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" + when: openshift_docker_options is not defined + - set_fact: + openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" + when: openshift_docker_log_driver is not defined + - set_fact: + openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" + when: openshift_docker_log_options is not defined + - set_fact: + openshift_docker_selinux_enabled: "{{ lookup('oo_option', 'docker_selinux_enabled') }}" + when: openshift_docker_selinux_enabled is not defined diff --git a/playbooks/common/openshift-cluster/initialize_openshift_repos.yml b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml new file mode 100644 index 000000000..a7114fc80 --- /dev/null +++ b/playbooks/common/openshift-cluster/initialize_openshift_repos.yml @@ -0,0 +1,8 @@ +--- +- name: Setup yum repositories for all hosts + hosts: oo_all_hosts + gather_facts: no + tasks: + - name: initialize openshift repos + include_role: + name: openshift_repos diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml index 6b40176e1..7112a6084 100644 --- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml +++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml @@ -1,27 +1,5 @@ --- # NOTE: requires openshift_facts be run -- name: Verify compatible yum/subscription-manager combination - hosts: l_oo_all_hosts - gather_facts: no - tasks: - # See: - # https://bugzilla.redhat.com/show_bug.cgi?id=1395047 - # https://bugzilla.redhat.com/show_bug.cgi?id=1282961 - # https://github.com/openshift/openshift-ansible/issues/1138 - - name: Check for bad combinations of yum and subscription-manager - command: > - {{ repoquery_cmd }} --installed --qf '%{version}' "yum" - register: yum_ver_test - changed_when: false - when: not openshift.common.is_atomic | bool - - fail: - msg: Incompatible versions of yum and subscription-manager found. You may need to update yum and yum-utils. - when: "not openshift.common.is_atomic | bool and 'Plugin \"search-disabled-repos\" requires API 2.7. Supported API is 2.6.' in yum_ver_test.stdout" - -- include: disable_excluder.yml - tags: - - always - - name: Determine openshift_version to configure on first master hosts: oo_first_master roles: diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 06cda36a5..99a634970 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -26,7 +26,10 @@ logging_elasticsearch_cluster_size: "{{ openshift_hosted_logging_elasticsearch_cluster_size | default(1) }}" logging_elasticsearch_ops_cluster_size: "{{ openshift_hosted_logging_elasticsearch_ops_cluster_size | default(1) }}" roles: + - role: openshift_default_storage_class + when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce') - role: openshift_hosted + r_openshift_hosted_use_calico: "{{ openshift.common.use_calico | default(false) | bool }}" - role: openshift_metrics when: openshift_hosted_metrics_deploy | default(false) | bool - role: openshift_logging @@ -53,6 +56,8 @@ pre_tasks: - set_fact: openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" + - set_fact: + openshift_metrics_hawkular_hostname: "{{ g_metrics_hostname | default('hawkular-metrics.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}" tasks: - block: @@ -60,3 +65,9 @@ name: openshift_logging tasks_from: update_master_config when: openshift_hosted_logging_deploy | default(false) | bool + + - block: + - include_role: + name: openshift_metrics + tasks_from: update_master_config + when: openshift_hosted_metrics_deploy | default(false) | bool diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml index d96a78c4c..c1a5d83cd 100644 --- a/playbooks/common/openshift-cluster/openshift_logging.yml +++ b/playbooks/common/openshift-cluster/openshift_logging.yml @@ -5,7 +5,7 @@ - openshift_logging - name: Update Master configs - hosts: masters:!oo_first_master + hosts: oo_masters:!oo_first_master tasks: - block: - include_role: diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml index 9f38ceea6..1dc180c26 100644 --- a/playbooks/common/openshift-cluster/openshift_metrics.yml +++ b/playbooks/common/openshift-cluster/openshift_metrics.yml @@ -3,3 +3,12 @@ hosts: oo_first_master roles: - openshift_metrics + +- name: OpenShift Metrics + hosts: oo_masters:!oo_first_master + serial: 1 + tasks: + - name: Setup the non-first masters configs + include_role: + name: openshift_metrics + tasks_from: update_master_config.yaml diff --git a/playbooks/common/openshift-cluster/openshift_provisioners.yml b/playbooks/common/openshift-cluster/openshift_provisioners.yml new file mode 100644 index 000000000..b1ca6f606 --- /dev/null +++ b/playbooks/common/openshift-cluster/openshift_provisioners.yml @@ -0,0 +1,5 @@ +--- +- name: OpenShift Provisioners + hosts: oo_first_master + roles: + - openshift_provisioners diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml new file mode 100644 index 000000000..6964e8567 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -0,0 +1,158 @@ +--- +- name: Check cert expirys + hosts: oo_etcd_to_config:oo_masters_to_config + vars: + openshift_certificate_expiry_show_all: yes + roles: + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry + +- name: Backup existing etcd CA certificate directories + hosts: oo_etcd_to_config + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Determine if CA certificate directory exists + stat: + path: "{{ etcd_ca_dir }}" + register: etcd_ca_certs_dir_stat + - name: Backup generated etcd certificates + command: > + tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz + {{ etcd_ca_dir }} + args: + warn: no + when: etcd_ca_certs_dir_stat.stat.exists | bool + - name: Remove CA certificate directory + file: + path: "{{ etcd_ca_dir }}" + state: absent + when: etcd_ca_certs_dir_stat.stat.exists | bool + +- name: Generate new etcd CA + hosts: oo_first_etcd + roles: + - role: openshift_etcd_ca + etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + +- name: Create temp directory for syncing certs + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: g_etcd_mktemp + changed_when: false + +- name: Distribute etcd CA to etcd hosts + hosts: oo_etcd_to_config + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Create a tarball of the etcd ca certs + command: > + tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz + -C {{ etcd_ca_dir }} . + args: + creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + warn: no + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Retrieve etcd ca cert tarball + fetch: + src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" + dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + delegate_to: "{{ etcd_ca_host }}" + run_once: true + - name: Ensure ca directory exists + file: + path: "{{ etcd_ca_dir }}" + state: directory + - name: Unarchive etcd ca cert tarballs + unarchive: + src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" + dest: "{{ etcd_ca_dir }}" + - name: Read current etcd CA + slurp: + src: "{{ etcd_conf_dir }}/ca.crt" + register: g_current_etcd_ca_output + - name: Read new etcd CA + slurp: + src: "{{ etcd_ca_dir }}/ca.crt" + register: g_new_etcd_ca_output + - copy: + content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" + dest: "{{ item }}/ca.crt" + with_items: + - "{{ etcd_conf_dir }}" + - "{{ etcd_ca_dir }}" + +- include: ../../openshift-etcd/restart.yml + # Do not restart etcd when etcd certificates were previously expired. + when: ('expired' not in (hostvars + | oo_select_keys(groups['etcd']) + | oo_collect('check_results.check_results.etcd') + | oo_collect('health'))) + +- name: Retrieve etcd CA certificate + hosts: oo_first_etcd + roles: + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + tasks: + - name: Retrieve etcd CA certificate + fetch: + src: "{{ etcd_conf_dir }}/ca.crt" + dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + +- name: Distribute etcd CA to masters + hosts: oo_masters_to_config + vars: + openshift_ca_host: "{{ groups.oo_first_master.0 }}" + tasks: + - name: Deploy etcd CA + copy: + src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" + dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" + when: groups.oo_etcd_to_config | default([]) | length > 0 + +- name: Delete temporary directory on localhost + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - file: + name: "{{ g_etcd_mktemp.stdout }}" + state: absent + changed_when: false + +- include: ../../openshift-master/restart.yml + # Do not restart masters when master certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml index 2963a5940..6b5c805e6 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd.yml @@ -3,7 +3,8 @@ hosts: oo_first_etcd any_errors_fatal: true roles: - - etcd_common + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" post_tasks: - name: Determine if generated etcd certificates exist stat: @@ -27,7 +28,8 @@ hosts: oo_etcd_to_config any_errors_fatal: true roles: - - etcd_common + - role: etcd_common + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" post_tasks: - name: Backup etcd certificates command: > @@ -50,6 +52,7 @@ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" openshift_ca_host: "{{ groups.oo_first_master.0 }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - name: Redeploy etcd client certificates for masters hosts: oo_masters_to_config @@ -63,4 +66,5 @@ etcd_cert_prefix: "master.etcd-" openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml index c30889d64..51b196299 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/masters.yml @@ -51,3 +51,13 @@ | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_certificates_redeploy: true + - role: lib_utils + post_tasks: + - yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: servingInfo.namedCertificates + value: "{{ openshift.master.named_certificates | default([]) | oo_named_certificates_list }}" + when: + - ('named_certificates' in openshift.master) + - openshift.master.named_certificates | default([]) | length > 0 + - openshift_master_overwrite_named_certificates | default(false) | bool diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 2af699209..089ae6bbc 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -6,131 +6,17 @@ msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles." when: not openshift.common.version_gte_3_2_or_1_2 | bool -- name: Backup existing etcd CA certificate directories - hosts: oo_etcd_to_config - roles: - - etcd_common - tasks: - - name: Determine if CA certificate directory exists - stat: - path: "{{ etcd_ca_dir }}" - register: etcd_ca_certs_dir_stat - - name: Backup generated etcd certificates - command: > - tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz - {{ etcd_ca_dir }} - args: - warn: no - when: etcd_ca_certs_dir_stat.stat.exists | bool - - name: Remove CA certificate directory - file: - path: "{{ etcd_ca_dir }}" - state: absent - when: etcd_ca_certs_dir_stat.stat.exists | bool - -- name: Generate new etcd CA - hosts: oo_first_etcd - roles: - - role: etcd_ca - etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" - etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - -- name: Create temp directory for syncing certs - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Create local temp directory for syncing certs - local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX - register: g_etcd_mktemp - changed_when: false - -- name: Distribute etcd CA to etcd hosts - hosts: oo_etcd_to_config +- name: Check cert expirys + hosts: oo_nodes_to_config:oo_masters_to_config vars: - etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + openshift_certificate_expiry_show_all: yes roles: - - etcd_common - tasks: - - name: Create a tarball of the etcd ca certs - command: > - tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz - -C {{ etcd_ca_dir }} . - args: - creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - warn: no - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Retrieve etcd ca cert tarball - fetch: - src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - delegate_to: "{{ etcd_ca_host }}" - run_once: true - - name: Ensure ca directory exists - file: - path: "{{ etcd_ca_dir }}" - state: directory - - name: Unarchive etcd ca cert tarballs - unarchive: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" - dest: "{{ etcd_ca_dir }}" - - name: Read current etcd CA - slurp: - src: "{{ etcd_conf_dir }}/ca.crt" - register: g_current_etcd_ca_output - - name: Read new etcd CA - slurp: - src: "{{ etcd_ca_dir }}/ca.crt" - register: g_new_etcd_ca_output - - copy: - content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" - dest: "{{ item }}/ca.crt" - with_items: - - "{{ etcd_conf_dir }}" - - "{{ etcd_ca_dir }}" - -- name: Retrieve etcd CA certificate - hosts: oo_first_etcd - roles: - - etcd_common - tasks: - - name: Retrieve etcd CA certificate - fetch: - src: "{{ etcd_conf_dir }}/ca.crt" - dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" - flat: yes - fail_on_missing: yes - validate_checksum: yes - -- name: Distribute etcd CA to masters - hosts: oo_masters_to_config - vars: - openshift_ca_host: "{{ groups.oo_first_master.0 }}" - tasks: - - name: Deploy CA certificate, key, bundle and serial - copy: - src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" - dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" - when: groups.oo_etcd_to_config | default([]) | length > 0 - -- name: Delete temporary directory on localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - file: - name: "{{ g_etcd_mktemp.stdout }}" - state: absent - changed_when: false - -- include: ../../../common/openshift-etcd/restart.yml + # Sets 'check_results' per host which contains health status for + # etcd, master and node certificates. We will use 'check_results' + # to determine if any certificates were expired prior to running + # this playbook. Service restarts will be skipped if any + # certificates were previously expired. + - role: openshift_certificate_expiry # Update master config when ca-bundle not referenced. Services will be # restarted below after new CA certificate has been distributed. @@ -322,7 +208,17 @@ group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout }}" with_items: "{{ client_users }}" -- include: ../../../common/openshift-master/restart.yml +- include: ../../openshift-master/restart.yml + # Do not restart masters when master certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_masters_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) - name: Distribute OpenShift CA certificate to nodes hosts: oo_nodes_to_config @@ -371,4 +267,14 @@ state: absent changed_when: false -- include: ../../../common/openshift-node/restart.yml +- include: ../../openshift-node/restart.yml + # Do not restart nodes when node certificates were previously expired. + when: ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"})) + and + ('expired' not in hostvars + | oo_select_keys(groups['oo_nodes_to_config']) + | oo_collect('check_results.check_results.ocp_certs') + | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"})) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml index 6771cc98d..afd5463b2 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/registry.yml @@ -48,10 +48,6 @@ # Replace dc/docker-registry certificate secret contents if set. - block: - - name: Load lib_openshift modules - include_role: - name: lib_openshift - - name: Retrieve registry service IP oc_service: namespace: default @@ -70,9 +66,13 @@ --signer-cert={{ openshift.common.config_base }}/master/ca.crt --signer-key={{ openshift.common.config_base }}/master/ca.key --signer-serial={{ openshift.common.config_base }}/master/ca.serial.txt - --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" + --config={{ mktemp.stdout }}/admin.kubeconfig + --hostnames="{{ docker_registry_service_ip.results.clusterip }},docker-registry.default.svc,docker-registry.default.svc.cluster.local,{{ docker_registry_route_hostname }}" --cert={{ openshift.common.config_base }}/master/registry.crt --key={{ openshift.common.config_base }}/master/registry.key + {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %} + --expire-days={{ openshift_hosted_registry_cert_expire_days | default(730) }} + {% endif %} - name: Update registry certificates secret oc_secret: diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml index 35eedd5ee..748bbbf91 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/router.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/router.yml @@ -51,7 +51,7 @@ name: router-certs namespace: default state: absent - run_once: true + run_once: true - name: Remove router service annotations command: > @@ -67,7 +67,67 @@ service.alpha.openshift.io/serving-cert-secret-name=router-certs --config={{ mktemp.stdout }}/admin.kubeconfig -n default - when: l_router_dc.rc == 0 and 'router-certs' in router_secrets + when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is undefined + + - block: + - assert: + that: + - "'certfile' in openshift_hosted_router_certificate" + - "'keyfile' in openshift_hosted_router_certificate" + - "'cafile' in openshift_hosted_router_certificate" + msg: |- + openshift_hosted_router_certificate has been set in the inventory but is + missing one or more required keys. Ensure that 'certfile', 'keyfile', + and 'cafile' keys have been specified for the openshift_hosted_router_certificate + inventory variable. + + - name: Read router certificate and key + become: no + local_action: + module: slurp + src: "{{ item }}" + register: openshift_router_certificate_output + # Defaulting dictionary keys to none to avoid deprecation warnings + # (future fatal errors) during template evaluation. Dictionary keys + # won't be accessed unless openshift_hosted_router_certificate is + # defined and has all keys (certfile, keyfile, cafile) which we + # check above. + with_items: + - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}" + - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}" + - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}" + + - name: Write temporary router certificate file + copy: + content: "{% for certificate in openshift_router_certificate_output.results -%}{{ certificate.content | b64decode }}{% endfor -%}" + dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + mode: 0600 + + - name: Write temporary router key file + copy: + content: "{{ (openshift_router_certificate_output.results + | oo_collect('content', {'source':(openshift_hosted_router_certificate | default({'keyfile':none})).keyfile}))[0] | b64decode }}" + dest: "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + mode: 0600 + + - name: Replace router-certs secret + shell: > + {{ openshift.common.client_binary }} secrets new router-certs + tls.crt="{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + tls.key="{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + --type=kubernetes.io/tls + --config={{ mktemp.stdout }}/admin.kubeconfig + --confirm + -o json | {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig replace -f - + + - name: Remove temporary router certificate and key files + file: + path: "{{ item }}" + state: absent + with_items: + - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.pem" + - "{{ mktemp.stdout }}/openshift-hosted-router-certificate.key" + when: l_router_dc.rc == 0 and 'router-certs' in router_secrets and openshift_hosted_router_certificate is defined - name: Redeploy router command: > diff --git a/playbooks/common/openshift-cluster/reset_excluder.yml b/playbooks/common/openshift-cluster/reset_excluder.yml deleted file mode 100644 index fe86f4c23..000000000 --- a/playbooks/common/openshift-cluster/reset_excluder.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Re-enable excluder if it was previously enabled - hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_role: - name: openshift_excluder - tasks_from: reset diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml new file mode 100644 index 000000000..599350258 --- /dev/null +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -0,0 +1,20 @@ +--- + +- name: Update Master configs + hosts: oo_masters + serial: 1 + tasks: + - block: + - include_role: + name: openshift_service_catalog + tasks_from: wire_aggregator + vars: + first_master: "{{ groups.oo_first_master[0] }}" + +- name: Service Catalog + hosts: oo_first_master + roles: + - openshift_service_catalog + - ansible_service_broker + vars: + first_master: "{{ groups.oo_first_master[0] }}" diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml index 078991b12..6cc56889a 100644 --- a/playbooks/common/openshift-cluster/std_include.yml +++ b/playbooks/common/openshift-cluster/std_include.yml @@ -1,30 +1,4 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: no - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - - set_fact: - openshift_deployment_type: "{{ deployment_type }}" - - include: evaluate_groups.yml tags: - always @@ -37,6 +11,10 @@ tags: - node +- include: initialize_openshift_repos.yml + tags: + - always + - include: initialize_openshift_version.yml tags: - always diff --git a/playbooks/common/openshift-cluster/update_repos_and_packages.yml b/playbooks/common/openshift-cluster/update_repos_and_packages.yml deleted file mode 100644 index b83e4d821..000000000 --- a/playbooks/common/openshift-cluster/update_repos_and_packages.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- include: evaluate_groups.yml - -- name: Subscribe hosts, update repos and update OS packages - hosts: oo_hosts_to_update - vars: - openshift_deployment_type: "{{ deployment_type }}" - roles: - # Explicitly calling openshift_facts because it appears that when - # rhel_subscribe is skipped that the openshift_facts dependency for - # openshift_repos is also skipped (this is the case at least for Ansible - # 2.0.2) - - openshift_facts - - role: rhel_subscribe - when: deployment_type in ["enterprise", "atomic-enterprise", "openshift-enterprise"] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - - openshift_repos - - os_update_latest diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml deleted file mode 100644 index 9f7961614..000000000 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -# This is a hack to allow us to use systemd_units.yml, but skip the handlers which -# restart services. We will unconditionally restart all containerized services -# because we have to unconditionally restart Docker: -- set_fact: - skip_node_svc_handlers: True - -- name: Update systemd units - include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} - -# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of -# play when the node has already been marked schedulable again. (this would look strange -# in logs otherwise) -- meta: flush_handlers diff --git a/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml new file mode 100644 index 000000000..800621857 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_master_excluders.yml @@ -0,0 +1,12 @@ +--- +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + r_openshift_excluder_verify_upgrade: true + r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" + r_openshift_excluder_package_state: latest + r_openshift_excluder_docker_package_state: latest diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml new file mode 100644 index 000000000..a66301c0d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml @@ -0,0 +1,12 @@ +--- +- name: Disable excluders + hosts: oo_nodes_to_upgrade:!oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + r_openshift_excluder_verify_upgrade: true + r_openshift_excluder_upgrade_target: "{{ openshift_upgrade_target }}" + r_openshift_excluder_package_state: latest + r_openshift_excluder_docker_package_state: latest diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml index 4ee6afe2a..7cc13137f 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -1,4 +1,13 @@ --- +- include: ../../evaluate_groups.yml + vars: + # Do not allow adding hosts during upgrade. + g_new_master_hosts: [] + g_new_node_hosts: [] + openshift_cluster_id: "{{ cluster_id | default('default') }}" + +- include: ../initialize_nodes_to_upgrade.yml + - name: Check for appropriate Docker versions hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config roles: @@ -11,7 +20,7 @@ msg: Cannot upgrade Docker on Atomic operating systems. when: openshift.common.is_atomic | bool - - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade_check.yml + - include: upgrade_check.yml when: docker_upgrade is not defined or docker_upgrade | bool @@ -28,7 +37,7 @@ tasks: - name: Mark node unschedulable - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" @@ -43,15 +52,19 @@ - name: Drain Node for Kubelet upgrade command: > - {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets + {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade + register: l_docker_upgrade_drain_result + until: not l_docker_upgrade_drain_result | failed + retries: 60 + delay: 60 - - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml + - include: tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool - name: Set node schedulability - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh index 8635eab0d..8635eab0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/files/nuke_images.sh +++ b/playbooks/common/openshift-cluster/upgrades/docker/nuke_images.sh diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/common/openshift-cluster/upgrades/docker/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/docker/roles +++ b/playbooks/common/openshift-cluster/upgrades/docker/roles diff --git a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml index 1b418920f..83f16ac0d 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/restart.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/restart.yml @@ -1,6 +1,10 @@ --- - name: Restart docker service: name=docker state=restarted + register: l_docker_restart_docker_in_upgrade_result + until: not l_docker_restart_docker_in_upgrade_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -11,7 +15,6 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -24,4 +27,5 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml index 17f8fc6e9..808cc562c 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/tasks/upgrade.yml @@ -4,7 +4,6 @@ - name: Stop containerized services service: name={{ item }} state=stopped with_items: - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -32,7 +31,13 @@ - debug: var=docker_image_count.stdout when: docker_upgrade_nuke_images is defined and docker_upgrade_nuke_images | bool -- service: name=docker state=stopped +- service: + name: docker + state: stopped + register: l_pb_docker_upgrade_stop_result + until: not l_pb_docker_upgrade_stop_result | failed + retries: 3 + delay: 30 - name: Upgrade Docker package: name=docker{{ '-' + docker_version }} state=present diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index 7ef79afa9..616ba04f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -1,88 +1,14 @@ --- - name: Backup etcd - hosts: etcd_hosts_to_backup - vars: - embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - etcdctl_command: "{{ 'etcdctl' if not openshift.common.is_containerized or embedded_etcd else 'docker exec etcd_container etcdctl' if not openshift.common.is_etcd_system_container else 'runc exec etcd etcdctl' }}" + hosts: oo_etcd_hosts_to_backup roles: - - openshift_facts - tasks: - # Ensure we persist the etcd role for this host in openshift_facts - - openshift_facts: - role: etcd - local_facts: {} - when: "'etcd' not in openshift" - - - stat: path=/var/lib/openshift - register: var_lib_openshift - - - stat: path=/var/lib/origin - register: var_lib_origin - - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True and var_lib_origin.stat.exists == False - - # TODO: replace shell module with command and update later checks - # We assume to be using the data dir for all backups. - - name: Check available disk space for etcd backup - shell: df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 - register: avail_disk - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - # TODO: replace shell module with command and update later checks - - name: Check current embedded etcd disk usage - shell: du -k {{ openshift.etcd.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Abort if insufficient disk space for etcd backup - fail: - msg: > - {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, - {{ avail_disk.stdout }} Kb available. - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - # For non containerized and non embedded we should have the correct version of - # etcd installed already. So don't do anything. - # - # For embedded or containerized we need to use the latest because OCP 3.3 uses - # a version of etcd that can only be backed up with etcd-3.x and if it's - # containerized then etcd version may be newer than that on the host so - # upgrade it. - # - # On atomic we have neither yum nor dnf so ansible throws a hard to debug error - # if you use package there, like this: "Could not find a module for unknown." - # see https://bugzilla.redhat.com/show_bug.cgi?id=1408668 - # - # TODO - We should refactor all containerized backups to use the containerized - # version of etcd to perform the backup rather than relying on the host's - # binaries. Until we do that we'll continue to have problems backing up etcd - # when atomic host has an older version than the version that's running in the - # container whether that's embedded or not - - name: Install latest etcd for containerized or embedded - package: - name: etcd - state: latest - when: ( embedded_etcd | bool or openshift.common.is_containerized ) and not openshift.common.is_atomic - - - name: Generate etcd backup - command: > - {{ etcdctl_command }} backup --data-dir={{ openshift.etcd.etcd_data_dir }} - --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }} - - - set_fact: - etcd_backup_complete: True - - - name: Display location of etcd backup - debug: - msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ backup_tag | default('') }}{{ timestamp }}" + - role: openshift_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_backup_tag: etcd_backup_tag + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - name: Gate on etcd backup hosts: localhost @@ -91,10 +17,10 @@ tasks: - set_fact: etcd_backup_completed: "{{ hostvars - | oo_select_keys(groups.etcd_hosts_to_backup) - | oo_collect('inventory_hostname', {'etcd_backup_complete': true}) }}" + | oo_select_keys(groups.oo_etcd_hosts_to_backup) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" - set_fact: - etcd_backup_failed: "{{ groups.etcd_hosts_to_backup | difference(etcd_backup_completed) }}" + etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}" - fail: msg: "Upgrade cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" when: etcd_backup_failed | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml deleted file mode 100644 index 5f8b59e17..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/containerized_tasks.yml +++ /dev/null @@ -1,46 +0,0 @@ ---- -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Get current image - shell: grep 'ExecStart=' /etc/systemd/system/etcd_container.service | awk '{print $NF}' - register: current_image - -- name: Set new_etcd_image - set_fact: - new_etcd_image: "{{ current_image.stdout | regex_replace('/etcd.*$','/etcd:' ~ upgrade_version ) }}" - -- name: Pull new etcd image - command: "docker pull {{ new_etcd_image }}" - -- name: Update to latest etcd image - replace: - dest: /etc/systemd/system/etcd_container.service - regexp: "{{ current_image.stdout }}$" - replace: "{{ new_etcd_image }}" - -- name: Restart etcd_container - systemd: - name: etcd_container - daemon_reload: yes - state: restarted - -## TODO: probably should just move this into the backup playbooks, also this -## will fail on atomic host. We need to revisit how to do etcd backups there as -## the container may be newer than etcdctl on the host. Assumes etcd3 obsoletes etcd (7.3.1) -- name: Upgrade etcd for etcdctl when not atomic - package: name=etcd state=latest - when: not openshift.common.is_atomic | bool - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 - -- name: Store new etcd_image - openshift_facts: - role: etcd - local_facts: - etcd_image: "{{ new_etcd_image }}" diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml deleted file mode 100644 index 30232110e..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/fedora_tasks.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -# F23 GA'd with etcd 2.0, currently has 2.2 in updates -# F24 GA'd with etcd-2.2, currently has 2.2 in updates -# F25 Beta currently has etcd 3.0 -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Update etcd - package: - name: "etcd" - state: "latest" - -- name: Restart etcd - service: - name: etcd - state: restarted - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh b/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh deleted file mode 120000 index 641e04e44..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/files/etcdctl.sh +++ /dev/null @@ -1 +0,0 @@ -../roles/etcd/files/etcdctl.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index fa86d29fb..64abc54e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -5,42 +5,19 @@ # mirrored packages on your own because only the GA and latest versions are # available in the repos. So for Fedora we'll simply skip this, sorry. -- include: ../../evaluate_groups.yml - tags: - - always - -# We use two groups one for hosts we're upgrading which doesn't include embedded etcd -# The other for backing up which includes the embedded etcd host, there's no need to -# upgrade embedded etcd that just happens when the master is updated. -- name: Evaluate additional groups for etcd - hosts: localhost - connection: local - become: no - tasks: - - name: Evaluate etcd_hosts_to_upgrade - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_upgrade - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else [] }}" - changed_when: False - - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False - - name: Backup etcd before upgrading anything include: backup.yml vars: - backup_tag: "pre-upgrade-" + etcd_backup_tag: "pre-upgrade-" when: openshift_etcd_backup | default(true) | bool - name: Drop etcdctl profiles - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - - include: roles/etcd/tasks/etcdctl.yml + - include_role: + name: etcd_common + vars: + r_etcd_common_action: drop_etcdctl - name: Perform etcd upgrade include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml b/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml deleted file mode 100644 index 3a972e8ab..000000000 --- a/playbooks/common/openshift-cluster/upgrades/etcd/rhel_tasks.yml +++ /dev/null @@ -1,20 +0,0 @@ ---- -- name: Verify cluster is healthy pre-upgrade - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - -- name: Update etcd RPM - package: - name: etcd-{{ upgrade_version }}* - state: latest - -- name: Restart etcd - service: - name: etcd - state: restarted - -- name: Verify cluster is healthy - command: "etcdctl --cert-file /etc/etcd/peer.crt --key-file /etc/etcd/peer.key --ca-file /etc/etcd/ca.crt -C https://{{ openshift.common.hostname }}:2379 cluster-health" - register: etcdctl - until: etcdctl.rc == 0 - retries: 3 - delay: 10 diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml index a9b5b94e6..39e82498d 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade.yml @@ -1,119 +1,110 @@ --- - name: Determine etcd version - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade tasks: - - name: Record RPM based etcd version - command: rpm -qa --qf '%{version}' etcd\* - args: - warn: no - register: etcd_rpm_version - failed_when: false - when: not openshift.common.is_containerized | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool and not openshift.common.is_etcd_system_container | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - - - name: Record containerized etcd version - command: runc exec etcd_container rpm -qa --qf '%{version}' etcd\* - register: etcd_container_version - failed_when: false - when: openshift.common.is_containerized | bool and openshift.common.is_etcd_system_container | bool - # AUDIT:changed_when: `false` because we are only inspecting - # state, not manipulating anything - changed_when: false - -# I really dislike this copy/pasta but I wasn't able to find a way to get it to loop -# through hosts, then loop through tasks only when appropriate -- name: Upgrade to 2.1 - hosts: etcd_hosts_to_upgrade - serial: 1 + - block: + - name: Record RPM based etcd version + command: rpm -qa --qf '%{version}' etcd\* + args: + warn: no + register: etcd_rpm_version + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + - debug: + msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected" + when: + - not openshift.common.is_containerized | bool + + - block: + - name: Record containerized etcd version (docker) + command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\* + register: etcd_container_version_docker + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + when: + - not openshift.common.is_etcd_system_container | bool + + # Given a register variables is set even if the whwen condition + # is false, we need to set etcd_container_version separately + - set_fact: + etcd_container_version: "{{ etcd_container_version_docker.stdout }}" + when: + - not openshift.common.is_etcd_system_container | bool + + - name: Record containerized etcd version (runc) + command: runc exec etcd rpm -qa --qf '%{version}' etcd\* + register: etcd_container_version_runc + failed_when: false + # AUDIT:changed_when: `false` because we are only inspecting + # state, not manipulating anything + changed_when: false + when: + - openshift.common.is_etcd_system_container | bool + + # Given a register variables is set even if the whwen condition + # is false, we need to set etcd_container_version separately + - set_fact: + etcd_container_version: "{{ etcd_container_version_runc.stdout }}" + when: + - openshift.common.is_etcd_system_container | bool + + - debug: + msg: "Etcd containerized version {{ etcd_container_version }} detected" + when: + - openshift.common.is_containerized | bool + +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.1' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.1','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.1' -- name: Upgrade RPM hosts to 2.2 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.2' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.2','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.2' -- name: Upgrade containerized hosts to 2.2.5 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 2.2.5 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('2.2','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '2.2.5' -- name: Upgrade RPM hosts to 2.3 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '2.3' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('2.3','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '2.3' -- name: Upgrade containerized hosts to 2.3.7 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 2.3.7 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('2.3','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '2.3.7' -- name: Upgrade RPM hosts to 3.0 - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_rpm_members.yml vars: - upgrade_version: '3.0' - tasks: - - include: rhel_tasks.yml - when: etcd_rpm_version.stdout | default('99') | version_compare('3.0','<') and ansible_distribution == 'RedHat' and not openshift.common.is_containerized | bool + etcd_upgrade_version: '3.0' -- name: Upgrade containerized hosts to etcd3 image - hosts: etcd_hosts_to_upgrade - serial: 1 +- include: upgrade_image_members.yml vars: - upgrade_version: 3.0.15 - tasks: - - include: containerized_tasks.yml - when: etcd_container_version.stdout | default('99') | version_compare('3.0','<') and openshift.common.is_containerized | bool + etcd_upgrade_version: '3.0.15' + +- include: upgrade_rpm_members.yml + vars: + etcd_upgrade_version: '3.1' + +- include: upgrade_image_members.yml + vars: + etcd_upgrade_version: '3.1.3' - name: Upgrade fedora to latest - hosts: etcd_hosts_to_upgrade + hosts: oo_etcd_hosts_to_upgrade serial: 1 tasks: - - include: fedora_tasks.yml - when: ansible_distribution == 'Fedora' and not openshift.common.is_containerized | bool + - include_role: + name: etcd_upgrade + when: + - ansible_distribution == 'Fedora' + - not openshift.common.is_containerized | bool - name: Backup etcd include: backup.yml vars: - backup_tag: "post-3.0-" + etcd_backup_tag: "post-3.0-" when: openshift_etcd_backup | default(true) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml new file mode 100644 index 000000000..831ca8f57 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_image_members.yml @@ -0,0 +1,17 @@ +--- +# INPUT etcd_upgrade_version +# INPUT etcd_container_version +# INPUT openshift.common.is_containerized +- name: Upgrade containerized hosts to {{ etcd_upgrade_version }} + hosts: oo_etcd_hosts_to_upgrade + serial: 1 + roles: + - role: etcd_upgrade + r_etcd_upgrade_action: upgrade + r_etcd_upgrade_mechanism: image + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + etcd_peer: "{{ openshift.common.hostname }}" + when: + - etcd_container_version | default('99') | version_compare(etcd_upgrade_version,'<') + - openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml new file mode 100644 index 000000000..2e79451e0 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/etcd/upgrade_rpm_members.yml @@ -0,0 +1,18 @@ +--- +# INPUT etcd_upgrade_version +# INPUT etcd_rpm_version +# INPUT openshift.common.is_containerized +- name: Upgrade to {{ etcd_upgrade_version }} + hosts: oo_etcd_hosts_to_upgrade + serial: 1 + roles: + - role: etcd_upgrade + r_etcd_upgrade_action: upgrade + r_etcd_upgrade_mechanism: rpm + r_etcd_upgrade_version: "{{ etcd_upgrade_version }}" + r_etcd_common_etcd_runtime: "host" + etcd_peer: "{{ openshift.common.hostname }}" + when: + - etcd_rpm_version.stdout | default('99') | version_compare(etcd_upgrade_version, '<') + - ansible_distribution == 'RedHat' + - not openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index a3b8c489e..0f421928b 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -1,78 +1,22 @@ --- -- name: Create initial host groups for localhost - hosts: localhost - connection: local - become: no - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml - - name: Evaluate group l_oo_all_hosts - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - changed_when: False - -- name: Create initial host groups for all hosts - hosts: l_oo_all_hosts - gather_facts: no - tags: - - always - tasks: - - include_vars: ../../../byo/openshift-cluster/cluster_hosts.yml - - include: ../evaluate_groups.yml vars: # Do not allow adding hosts during upgrade. g_new_master_hosts: [] g_new_node_hosts: [] openshift_cluster_id: "{{ cluster_id | default('default') }}" - openshift_deployment_type: "{{ deployment_type }}" -- name: Set oo_options - hosts: oo_all_hosts - tasks: - - set_fact: - openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" - when: openshift_docker_additional_registries is not defined - - set_fact: - openshift_docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') }}" - when: openshift_docker_insecure_registries is not defined - - set_fact: - openshift_docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') }}" - when: openshift_docker_blocked_registries is not defined - - set_fact: - openshift_docker_options: "{{ lookup('oo_option', 'docker_options') }}" - when: openshift_docker_options is not defined - - set_fact: - openshift_docker_log_driver: "{{ lookup('oo_option', 'docker_log_driver') }}" - when: openshift_docker_log_driver is not defined - - set_fact: - openshift_docker_log_options: "{{ lookup('oo_option', 'docker_log_options') }}" - when: openshift_docker_log_options is not defined +- include: ../initialize_oo_option_facts.yml - include: ../initialize_facts.yml -- name: Ensure clean repo cache in the event repos have been changed manually - hosts: oo_all_hosts - tags: - - pre_upgrade - tasks: - - name: Clean package cache - command: "{{ ansible_pkg_mgr }} clean all" - when: not openshift.common.is_atomic | bool - args: - warn: no - - name: Ensure firewall is not switched during upgrade hosts: oo_all_hosts tasks: - name: Check if iptables is running command: systemctl status iptables - ignore_errors: true changed_when: false + failed_when: false register: service_iptables_status - name: Set fact os_firewall_use_firewalld FALSE for iptables diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 046535680..72de63070 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -6,27 +6,32 @@ - lib_openshift tasks: - - name: Retrieve list of openshift nodes matching upgrade label - oc_obj: - state: list - kind: node - selector: "{{ openshift_upgrade_nodes_label }}" - register: nodes_to_upgrade - when: openshift_upgrade_nodes_label is defined + - when: openshift_upgrade_nodes_label is defined + block: + - name: Retrieve list of openshift nodes matching upgrade label + oc_obj: + state: list + kind: node + selector: "{{ openshift_upgrade_nodes_label }}" + register: nodes_to_upgrade - # We got a list of nodes with the label, now we need to match these with inventory hosts - # using their openshift.common.hostname fact. - - name: Map labelled nodes to inventory hosts - add_host: - name: "{{ item }}" - groups: temp_nodes_to_upgrade - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_become: "{{ g_sudo | default(omit) }}" - with_items: " {{ groups['oo_nodes_to_config'] }}" - when: - - openshift_upgrade_nodes_label is defined - - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list - changed_when: false + - name: Fail if no nodes match openshift_upgrade_nodes_label + fail: + msg: "openshift_upgrade_nodes_label was specified but no nodes matched" + when: nodes_to_upgrade.results.results[0]['items'] | length == 0 + + # We got a list of nodes with the label, now we need to match these with inventory hosts + # using their openshift.common.hostname fact. + - name: Map labelled nodes to inventory hosts + add_host: + name: "{{ item }}" + groups: temp_nodes_to_upgrade + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: " {{ groups['oo_nodes_to_config'] }}" + when: + - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list + changed_when: false # Build up the oo_nodes_to_upgrade group, use the list filtered by label if # present, otherwise hit all nodes: diff --git a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py index 673f11889..4eac8b067 100755 --- a/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py +++ b/playbooks/common/openshift-cluster/upgrades/library/openshift_upgrade_config.py @@ -1,7 +1,5 @@ #!/usr/bin/python # -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - """Ansible module for modifying OpenShift configs during an upgrade""" import os diff --git a/playbooks/common/openshift-cluster/upgrades/master_docker b/playbooks/common/openshift-cluster/upgrades/master_docker deleted file mode 120000 index 6aeca2842..000000000 --- a/playbooks/common/openshift-cluster/upgrades/master_docker +++ /dev/null @@ -1 +0,0 @@ -../../../../roles/openshift_master/templates/master_docker
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index 6f096f705..d9ddf3860 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -5,10 +5,12 @@ - name: Upgrade default router and default registry hosts: oo_first_master vars: - openshift_deployment_type: "{{ deployment_type }}" - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', openshift_image_tag ) }}" - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', openshift_image_tag ) }}" - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" + registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | + replace ( '${version}', openshift_image_tag ) }}" + router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | + replace ( '${version}', openshift_image_tag ) }}" + registry_console_image: "{{ openshift.master.registry_url | regex_replace ( '(origin|ose)-\\${component}', 'registry-console') | + replace ( '${version}', 'v' ~ openshift.common.short_version ) }}" pre_tasks: - name: Load lib_openshift modules @@ -22,7 +24,10 @@ selector: 'router' register: all_routers - - set_fact: haproxy_routers="{{ all_routers.results.results[0]['items'] | oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | oo_select_keys_from_list(['metadata']) }}" + - set_fact: + haproxy_routers: "{{ all_routers.results.results[0]['items'] | + oo_pods_match_component(openshift_deployment_type, 'haproxy-router') | + oo_select_keys_from_list(['metadata']) }}" when: - all_routers.results.returncode == 0 @@ -31,16 +36,15 @@ - all_routers.results.returncode != 0 - name: Update router image to current version + oc_edit: + kind: dc + name: "{{ item['labels']['deploymentconfig'] }}" + namespace: "{{ item['namespace'] }}" + content: + spec.template.spec.containers[0].image: "{{ router_image }}" + with_items: "{{ haproxy_routers }}" when: - all_routers.results.returncode == 0 - command: > - {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p - '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}' - --api-version=v1 - with_items: "{{ haproxy_routers }}" - # AUDIT:changed_when_note: `false` not being set here. What we - # need to do is check the current router image version and see if - # this task needs to be ran. - name: Check for default registry oc_obj: @@ -50,15 +54,34 @@ register: _default_registry - name: Update registry image to current version + oc_edit: + kind: dc + name: docker-registry + namespace: default + content: + spec.template.spec.containers[0].image: "{{ registry_image }}" when: - _default_registry.results.results[0] != {} - command: > - {{ oc_cmd }} patch dc/docker-registry -n default -p - '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' - --api-version=v1 - # AUDIT:changed_when_note: `false` not being set here. What we - # need to do is check the current registry image version and see - # if this task needs to be ran. + + - name: Check for registry-console + oc_obj: + state: list + kind: dc + name: registry-console + register: _registry_console + when: + - openshift.common.deployment_type != 'origin' + + - name: Update registry-console image to current version + oc_edit: + kind: dc + name: registry-console + namespace: default + content: + spec.template.spec.containers[0].image: "{{ registry_console_image }}" + when: + - openshift.common.deployment_type != 'origin' + - _registry_console.results.results[0] != {} roles: - openshift_manageiq @@ -96,6 +119,12 @@ - not grep_plugin_order_override | skipped - grep_plugin_order_override.rc == 0 -- include: ../reset_excluder.yml +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config tags: - always + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml new file mode 100644 index 000000000..9d8b73cff --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/tasks/verify_docker_upgrade_targets.yml @@ -0,0 +1,20 @@ +--- +# Only check if docker upgrade is required if docker_upgrade is not +# already set to False. +- include: ../docker/upgrade_check.yml + when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool + +# Additional checks for Atomic hosts: + +- name: Determine available Docker + shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" + register: g_atomic_docker_version_result + when: openshift.common.is_atomic | bool + +- set_fact: + l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" + when: openshift.common.is_atomic | bool + +- fail: + msg: This playbook requires access to Docker 1.12 or later + when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml index 06eb5f936..45022cd61 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml @@ -9,23 +9,16 @@ local_facts: ha: "{{ groups.oo_masters_to_config | length > 1 }}" - - name: Ensure Master is running - service: - name: "{{ openshift.common.service_type }}-master" - state: started - enabled: yes - when: openshift.master.ha is defined and not openshift.master.ha | bool and openshift.common.is_containerized | bool - - name: Ensure HA Master is running service: name: "{{ openshift.common.service_type }}-master-api" state: started enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + when: openshift.common.is_containerized | bool - name: Ensure HA Master is running service: name: "{{ openshift.common.service_type }}-master-controllers" state: started enabled: yes - when: openshift.master.ha is defined and openshift.master.ha | bool and openshift.common.is_containerized | bool + when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml deleted file mode 100644 index 7646e0fa6..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Verify docker upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config - tasks: - # Only check if docker upgrade is required if docker_upgrade is not - # already set to False. - - include: ../docker/upgrade_check.yml - when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool - - # Additional checks for Atomic hosts: - - - name: Determine available Docker - shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker" - register: g_atomic_docker_version_result - when: openshift.common.is_atomic | bool - - - set_fact: - l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" - when: openshift.common.is_atomic | bool - - - fail: - msg: This playbook requires access to Docker 1.12 or later - when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<') diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml new file mode 100644 index 000000000..497709d25 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml @@ -0,0 +1,13 @@ +--- +- name: Verify Host Requirements + hosts: oo_all_hosts + roles: + - openshift_health_checker + vars: + - r_openshift_health_checker_playbook_context: upgrade + post_tasks: + - action: openshift_health_check + args: + checks: + - disk_availability + - memory_availability diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml deleted file mode 100644 index 354af3cde..000000000 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_nodes_running.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Verify node processes - hosts: oo_nodes_to_config - roles: - - openshift_facts - - openshift_docker_facts - tasks: - - name: Ensure Node is running - service: - name: "{{ openshift.common.service_type }}-node" - state: started - enabled: yes - when: openshift.common.is_containerized | bool diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml index c83923dae..9b4a8e413 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml @@ -1,41 +1,43 @@ --- -- name: Verify upgrade targets - hosts: oo_masters_to_config:oo_nodes_to_upgrade - vars: - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - pre_tasks: - - fail: - msg: Verify OpenShift is already installed - when: openshift.common.version is not defined +- name: Fail when OpenShift is not installed + fail: + msg: Verify OpenShift is already installed + when: openshift.common.version is not defined - - fail: - msg: Verify the correct version was found - when: verify_upgrade_version is defined and openshift_version != verify_upgrade_version +- name: Verify containers are available for upgrade + command: > + docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} + register: pull_result + changed_when: "'Downloaded newer image' in pull_result.stdout" + when: openshift.common.is_containerized | bool - - set_fact: - g_new_service_name: "{{ 'origin' if deployment_type =='origin' else 'atomic-openshift' }}" - when: not openshift.common.is_containerized | bool +- when: not openshift.common.is_containerized | bool + block: + - name: Check latest available OpenShift RPM version + repoquery: + name: "{{ openshift.common.service_type }}" + ignore_excluders: true + register: repoquery_out - - name: Verify containers are available for upgrade - command: > - docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }} - register: pull_result - changed_when: "'Downloaded newer image' in pull_result.stdout" - when: openshift.common.is_containerized | bool + - name: Fail when unable to determine available OpenShift RPM version + fail: + msg: "Unable to determine available OpenShift RPM version" + when: + - not repoquery_out.results.package_found - - name: Check latest available OpenShift RPM version - command: > - {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}" - failed_when: false - changed_when: false - register: avail_openshift_version - when: not openshift.common.is_containerized | bool + - name: Set fact avail_openshift_version + set_fact: + avail_openshift_version: "{{ repoquery_out.results.versions.available_versions.0 }}" - name: Verify OpenShift RPMs are available for upgrade fail: - msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required" - when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<') + msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required" + when: + - avail_openshift_version | default('0.0', True) | version_compare(openshift_release, '<') - - fail: - msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" - when: deployment_type == 'origin' and openshift.common.version | version_compare(openshift_upgrade_min,'<') +- name: Fail when openshift version does not meet minium requirement for Origin upgrade + fail: + msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later" + when: + - deployment_type == 'origin' + - openshift.common.version | version_compare(openshift_upgrade_min,'<') diff --git a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml index df2b664d4..164baca81 100644 --- a/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/rpm_upgrade.yml @@ -1,8 +1,39 @@ --- -# We verified latest rpm available is suitable, so just yum update. -- name: Upgrade packages - package: "name={{ openshift.common.service_type }}-{{ component }}{{ openshift_pkg_version }} state=present" +# When we update package "a-${version}" and a requires b >= ${version} if we +# don't specify the version of b yum will choose the latest version of b +# available and the whole set of dependencies end up at the latest version. +# Since the package module, unlike the yum module, doesn't flatten a list +# of packages into one transaction we need to do that explicitly. The ansible +# core team tells us not to rely on yum module transaction flattening anyway. -- name: Ensure python-yaml present for config upgrade - package: name=PyYAML state=present - when: not openshift.common.is_atomic | bool +# TODO: If the sdn package isn't already installed this will install it, we +# should fix that + +- name: Upgrade master packages + package: name={{ master_pkgs | join(',') }} state=present + vars: + master_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-master{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version}}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "master" + - not openshift.common.is_atomic | bool + +- name: Upgrade node packages + package: name={{ node_pkgs | join(',') }} state=present + vars: + node_pkgs: + - "{{ openshift.common.service_type }}{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version }}" + - "{{ openshift.common.service_type }}-clients{{ openshift_pkg_version }}" + - "tuned-profiles-{{ openshift.common.service_type }}-node{{ openshift_pkg_version }}" + - PyYAML + when: + - component == "node" + - not openshift.common.is_atomic | bool diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index fd01a6625..18f10437d 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -2,17 +2,22 @@ ############################################################################### # Upgrade Masters ############################################################################### -- name: Evaluate additional groups for upgrade - hosts: localhost - connection: local - become: no + +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade all storage + hosts: oo_first_master tasks: - - name: Evaluate etcd_hosts_to_backup - add_host: - name: "{{ item }}" - groups: etcd_hosts_to_backup - with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}" - changed_when: False + - name: Upgrade all storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + register: l_pb_upgrade_control_plane_pre_upgrade_storage + when: openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_pre_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0 + - openshift_upgrade_pre_storage_migration_fatal | default(true,true) | bool # If facts cache were for some reason deleted, this fact may not be set, and if not set # it will always default to true. This causes problems for the etcd data dir fact detection @@ -64,6 +69,7 @@ static: yes roles: - openshift_facts + - lib_utils post_tasks: # Run the pre-upgrade hook if defined: @@ -85,7 +91,7 @@ - include_vars: ../../../../roles/openshift_master/vars/main.yml - - name: Update systemd units + - name: Remove any legacy systemd units and update systemd units include: ../../../../roles/openshift_master/tasks/systemd_units.yml - name: Check for ca-bundle.crt @@ -113,6 +119,13 @@ state: link when: ca_crt_stat.stat.isreg and not ca_bundle_stat.stat.exists + - name: Update oreg value + yedit: + src: "{{ openshift.common.config_base }}/master/master-config.yaml" + key: 'imageConfig.format' + value: "{{ oreg_url | default(oreg_url_master) }}" + when: oreg_url is defined or oreg_url_master is defined + # Run the upgrade hook prior to restarting services/system if defined: - debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}" when: openshift_master_upgrade_hook is defined @@ -133,6 +146,19 @@ - include: "{{ openshift_master_upgrade_post_hook }}" when: openshift_master_upgrade_post_hook is defined + - name: Post master upgrade - Upgrade clusterpolicies storage + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=clusterpolicies --confirm + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool + run_once: true + delegate_to: "{{ groups.oo_first_master.0 }}" + - set_fact: master_update_complete: True @@ -173,7 +199,11 @@ - name: Reconcile Cluster Roles command: > {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --additive-only=true --confirm + policy reconcile-cluster-roles --additive-only=true --confirm -o name + register: reconcile_cluster_role_result + changed_when: + - reconcile_cluster_role_result.stdout != '' + - reconcile_cluster_role_result.rc == 0 run_once: true - name: Reconcile Cluster Role Bindings @@ -184,21 +214,45 @@ --exclude-groups=system:authenticated:oauth --exclude-groups=system:unauthenticated --exclude-users=system:anonymous - --additive-only=true --confirm + --additive-only=true --confirm -o name when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + register: reconcile_bindings_result + changed_when: + - reconcile_bindings_result.stdout != '' + - reconcile_bindings_result.rc == 0 run_once: true - name: Reconcile Jenkins Pipeline Role Bindings command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig policy reconcile-cluster-role-bindings system:build-strategy-jenkinspipeline --confirm -o name run_once: true + register: reconcile_jenkins_role_binding_result + changed_when: + - reconcile_jenkins_role_binding_result.stdout != '' + - reconcile_jenkins_role_binding_result.rc == 0 when: openshift.common.version_gte_3_4_or_1_4 | bool - name: Reconcile Security Context Constraints command: > - {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true + {{ openshift.common.client_binary }} adm policy --config={{ openshift.common.config_base }}/master/admin.kubeconfig reconcile-sccs --confirm --additive-only=true -o name + register: reconcile_scc_result + changed_when: + - reconcile_scc_result.stdout != '' + - reconcile_scc_result.rc == 0 run_once: true + - name: Migrate storage post policy reconciliation + command: > + {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig + migrate storage --include=* --confirm + run_once: true + register: l_pb_upgrade_control_plane_post_upgrade_storage + when: openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + failed_when: + - openshift_upgrade_post_storage_migration_enabled | default(true,true) | bool + - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0 + - openshift_upgrade_post_storage_migration_fatal | default(false,true) | bool + - set_fact: reconcile_complete: True @@ -227,15 +281,15 @@ roles: - openshift_facts tasks: - - include: docker/upgrade.yml + - include: docker/tasks/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - name: Drain and upgrade master nodes hosts: oo_masters_to_config:&oo_nodes_to_upgrade # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. - serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" - any_errors_fatal: true + serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}" + max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}" pre_tasks: - name: Load lib_openshift modules @@ -246,7 +300,7 @@ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" @@ -257,18 +311,23 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_control_plane_drain_result + until: not l_upgrade_control_plane_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift - openshift_facts - docker + - openshift_node_dnsmasq - openshift_node_upgrade post_tasks: - name: Set node schedulability - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 4e1838c71..c93a5d89c 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -4,7 +4,7 @@ # This var must be set with -e on invocation, as it is not a per-host inventory var # and is evaluated early. Values such as "20%" can also be used. serial: "{{ openshift_upgrade_nodes_serial | default(1) }}" - any_errors_fatal: true + max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}" pre_tasks: - name: Load lib_openshift modules @@ -15,7 +15,7 @@ # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. - name: Mark node unschedulable - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: False delegate_to: "{{ groups.oo_first_master.0 }}" @@ -26,18 +26,26 @@ - name: Drain Node for Kubelet upgrade command: > - {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --force --delete-local-data --ignore-daemonsets + {{ hostvars[groups.oo_first_master.0].openshift.common.admin_binary }} drain {{ openshift.node.nodename | lower }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig --force --delete-local-data --ignore-daemonsets delegate_to: "{{ groups.oo_first_master.0 }}" + register: l_upgrade_nodes_drain_result + until: not l_upgrade_nodes_drain_result | failed + retries: 60 + delay: 60 roles: - lib_openshift - openshift_facts - docker + - openshift_node_dnsmasq - openshift_node_upgrade + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" post_tasks: - name: Set node schedulability - oadm_manage_node: + oc_adm_manage_node: node: "{{ openshift.node.nodename | lower }}" schedulable: True delegate_to: "{{ groups.oo_first_master.0 }}" @@ -46,7 +54,3 @@ register: node_schedulable until: node_schedulable|succeeded when: node_unschedulable|changed - -- include: ../reset_excluder.yml - tags: - - always diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml index 88f2ddc78..8558bf3e9 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_scheduler.yml @@ -63,31 +63,34 @@ - block: - debug: msg: "WARNING: openshift_master_scheduler_predicates is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] }}" + when: openshift_master_scheduler_predicates in older_predicates + older_predicates_no_region + [prev_predicates] + [prev_predicates_no_region] - debug: msg: "WARNING: openshift_master_scheduler_predicates does not match current defaults of: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_predicates | default(none) is not none }}" + when: openshift_master_scheduler_predicates != openshift_master_scheduler_default_predicates + when: openshift_master_scheduler_predicates | default(none) is not none # Handle cases where openshift_master_predicates is not defined - block: - debug: msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler predicates: {{ openshift_master_scheduler_current_predicates }}\ncurrent scheduler default predicates are: {{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates not in older_predicates + [prev_predicates] - set_fact: openshift_upgrade_scheduler_predicates: "{{ openshift_master_scheduler_default_predicates }}" - when: "{{ openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates and - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] }}" + when: + - openshift_master_scheduler_current_predicates != openshift_master_scheduler_default_predicates + - openshift_master_scheduler_current_predicates in older_predicates + [prev_predicates] - set_fact: openshift_upgrade_scheduler_predicates: "{{ default_predicates_no_region }}" - when: "{{ openshift_master_scheduler_current_predicates != default_predicates_no_region and - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] }}" + when: + - openshift_master_scheduler_current_predicates != default_predicates_no_region + - openshift_master_scheduler_current_predicates in older_predicates_no_region + [prev_predicates_no_region] - when: "{{ openshift_master_scheduler_predicates | default(none) is none }}" + when: openshift_master_scheduler_predicates | default(none) is none # Upgrade priorities @@ -120,31 +123,34 @@ - block: - debug: msg: "WARNING: openshift_master_scheduler_priorities is set to defaults from an earlier release of OpenShift current defaults are: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] }}" + when: openshift_master_scheduler_priorities in older_priorities + older_priorities_no_zone + [prev_priorities] + [prev_priorities_no_zone] - debug: msg: "WARNING: openshift_master_scheduler_priorities does not match current defaults of: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_priorities | default(none) is not none }}" + when: openshift_master_scheduler_priorities != openshift_master_scheduler_default_priorities + when: openshift_master_scheduler_priorities | default(none) is not none # Handle cases where openshift_master_priorities is not defined - block: - debug: msg: "WARNING: existing scheduler config does not match previous known defaults automated upgrade of scheduler config is disabled.\nexisting scheduler priorities: {{ openshift_master_scheduler_current_priorities }}\ncurrent scheduler default priorities are: {{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities not in older_priorities + [prev_priorities] - set_fact: openshift_upgrade_scheduler_priorities: "{{ openshift_master_scheduler_default_priorities }}" - when: "{{ openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities and - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] }}" + when: + - openshift_master_scheduler_current_priorities != openshift_master_scheduler_default_priorities + - openshift_master_scheduler_current_priorities in older_priorities + [prev_priorities] - set_fact: openshift_upgrade_scheduler_priorities: "{{ default_priorities_no_zone }}" - when: "{{ openshift_master_scheduler_current_priorities != default_priorities_no_zone and - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] }}" + when: + - openshift_master_scheduler_current_priorities != default_priorities_no_zone + - openshift_master_scheduler_current_priorities in older_priorities_no_zone + [prev_priorities_no_zone] - when: "{{ openshift_master_scheduler_priorities | default(none) is none }}" + when: openshift_master_scheduler_priorities | default(none) is none # Update scheduler @@ -162,5 +168,6 @@ content: "{{ scheduler_config | to_nice_json }}" dest: "{{ openshift_master_scheduler_conf }}" backup: true - when: "{{ openshift_upgrade_scheduler_predicates is defined or - openshift_upgrade_scheduler_priorities is defined }}" + when: > + openshift_upgrade_scheduler_predicates is defined or + openshift_upgrade_scheduler_priorities is defined diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index 68c71a132..d69472fad 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -53,7 +53,7 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'admissionConfig.pluginConfig' yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "{{ 'admission_plugin_config' in openshift.master }}" + when: "'admission_plugin_config' in openshift.master" - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/common/openshift-cluster/upgrades/v3_3/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml new file mode 100644 index 000000000..a241ef039 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml @@ -0,0 +1,118 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml new file mode 100644 index 000000000..54c85f0fb --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml @@ -0,0 +1,118 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_3/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml new file mode 100644 index 000000000..cee4e9087 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml @@ -0,0 +1,113 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml + vars: + node_config_hook: "v3_3/node_config_upgrade.yml" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml index 43c2ffcd4..ed89dbe8d 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml @@ -3,7 +3,7 @@ dest: "{{ openshift.common.config_base}}/master/master-config.yaml" yaml_key: 'admissionConfig.pluginConfig' yaml_value: "{{ openshift.master.admission_plugin_config }}" - when: "{{ 'admission_plugin_config' in openshift.master }}" + when: "'admission_plugin_config' in openshift.master" - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/common/openshift-cluster/upgrades/v3_4/roles index 6bc1a7aef..6bc1a7aef 120000 --- a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/roles diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml new file mode 100644 index 000000000..ae217ba2e --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml @@ -0,0 +1,116 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml new file mode 100644 index 000000000..d7cb38d03 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml @@ -0,0 +1,118 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_4/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml new file mode 100644 index 000000000..8531e6045 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage - hosts: oo_first_master - roles: - - { role: openshift_cli } - vars: - # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe - # restart. - skip_docker_role: True - tasks: - - name: Upgrade job storage - command: > - {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig - migrate storage --include=jobs --confirm - run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml new file mode 100644 index 000000000..a3d0d6305 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -0,0 +1,118 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml new file mode 100644 index 000000000..5fee56615 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -0,0 +1,122 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_5/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml new file mode 100644 index 000000000..e29d0f8e6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml new file mode 100644 index 000000000..ae63c9ca9 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/validator.yml @@ -0,0 +1,67 @@ +--- +############################################################################### +# Pre upgrade checks for known data problems, if this playbook fails you should +# contact support. If you're not supported contact users@lists.openshift.com +# +# oc_objectvalidator provides these two checks +# 1 - SDN Data issues, never seen in the wild but known possible due to code audits +# https://github.com/openshift/origin/issues/12697 +# 2 - Namespace protections, https://bugzilla.redhat.com/show_bug.cgi?id=1428934 +# +############################################################################### +- name: Verify 3.5 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - name: Check for invalid namespaces and SDN errors + oc_objectvalidator: + + # What's all this PetSet business about? + # + # 'PetSets' were ALPHA resources in Kube <= 3.4. In >= 3.5 they are + # no longer supported. The BETA resource 'StatefulSets' replaces + # them. We can't migrate clients PetSets to + # StatefulSets. Additionally, Red Hat has never officially supported + # these resource types. Sorry users, but if you were using + # unsupported resources from the Kube documentation then we can't + # help you at this time. + # + # Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1428229 + - name: Check if legacy PetSets exist + oc_obj: + state: list + all_namespaces: true + kind: petsets + register: l_do_petsets_exist + + - name: Fail on unsupported resource migration 'PetSets' + fail: + msg: > + PetSet objects were detected in your cluster. These are an + Alpha feature in upstream Kubernetes 1.4 and are not supported + by Red Hat. In Kubernetes 1.5, they are replaced by the Beta + feature StatefulSets. Red Hat currently does not offer support + for either PetSets or StatefulSets. + + Automatically migrating PetSets to StatefulSets in OpenShift + Container Platform (OCP) 3.5 is not supported. See the + Kubernetes "Upgrading from PetSets to StatefulSets" + documentation for additional information: + + https://kubernetes.io/docs/tasks/manage-stateful-set/upgrade-pet-set-to-stateful-set/ + + PetSets MUST be removed before upgrading to OCP 3.5. Red Hat + strongly recommends reading the above referenced documentation + in its entirety before taking any destructive actions. + + If you want to simply remove all PetSets without manually + migrating to StatefulSets, run this command as a user with + cluster-admin privileges: + + $ oc get petsets --all-namespaces -o yaml | oc delete -f - --cascade=false + when: + # Search did not fail, valid resource type found + - l_do_petsets_exist.results.returncode == 0 + # Items do exist in the search results + - l_do_petsets_exist.results.results.0['items'] | length > 0 diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/roles b/playbooks/common/openshift-cluster/upgrades/v3_6/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml new file mode 100644 index 000000000..51acd17da --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -0,0 +1,122 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml new file mode 100644 index 000000000..9fe059ac9 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -0,0 +1,122 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_6/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml new file mode 100644 index 000000000..1b10d4e37 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.6' + openshift_upgrade_min: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}" + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml new file mode 100644 index 000000000..78c1767b8 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/validator.yml @@ -0,0 +1,12 @@ +--- +############################################################################### +# Pre upgrade checks for known data problems, if this playbook fails you should +# contact support. If you're not supported contact users@lists.openshift.com +############################################################################### +- name: Verify 3.6 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + tasks: + - name: Check for invalid namespaces and SDN errors + oc_objectvalidator: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins new file mode 120000 index 000000000..7de3c1dd7 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/filter_plugins @@ -0,0 +1 @@ +../../../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginConfig' + yaml_value: "{{ openshift.master.admission_plugin_config }}" + when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'admissionConfig.pluginOrderOverride' + yaml_value: + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'kubernetesMasterConfig.admissionConfig' + yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/roles b/playbooks/common/openshift-cluster/upgrades/v3_7/roles new file mode 120000 index 000000000..415645be6 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/roles @@ -0,0 +1 @@ +../../../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml new file mode 100644 index 000000000..9ec40723a --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -0,0 +1,122 @@ +--- +# +# Full Control Plane + Nodes Upgrade +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade + +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos and initialize facts on all hosts + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_health_checks.yml + tags: + - pre_upgrade + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + +- include: ../upgrade_nodes.yml + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml new file mode 100644 index 000000000..f97f34c3b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -0,0 +1,122 @@ +--- +# +# Control Plane Upgrade Playbook +# +# Upgrades masters and Docker (only on standalone etcd hosts) +# +# This upgrade does not include: +# - node service running on masters +# - docker running on masters +# - node service running on dedicated nodes +# +# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on control plane hosts + hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config + tags: + - pre_upgrade + roles: + - openshift_repos + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_master_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- include: ../../../openshift-master/validate_restart.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_masters_to_config + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: validator.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_masters_to_config:oo_etcd_to_config + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_control_plane.yml + vars: + master_config_hook: "v3_7/master_config_upgrade.yml" + +- include: ../post_control_plane.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml new file mode 100644 index 000000000..e95b90cd5 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml @@ -0,0 +1,111 @@ +--- +# +# Node Upgrade Playbook +# +# Upgrades nodes only, but requires the control plane to have already been upgraded. +# +- include: ../init.yml + tags: + - pre_upgrade + +- name: Configure the upgrade target for the common upgrade tasks + hosts: oo_all_hosts + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_upgrade_target: '3.7' + openshift_upgrade_min: '3.6' + +# Pre-upgrade +- include: ../initialize_nodes_to_upgrade.yml + tags: + - pre_upgrade + +- name: Update repos on nodes + hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config + roles: + - openshift_repos + tags: + - pre_upgrade + +- name: Set openshift_no_proxy_internal_hostnames + hosts: oo_masters_to_config:oo_nodes_to_upgrade + tags: + - pre_upgrade + tasks: + - set_fact: + openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade'] + | union(groups['oo_masters_to_config']) + | union(groups['oo_etcd_to_config'] | default([]))) + | oo_collect('openshift.common.hostname') | default([]) | join (',') + }}" + when: + - openshift_http_proxy is defined or openshift_https_proxy is defined + - openshift_generate_no_proxy_hosts | default(True) | bool + +- include: ../pre/verify_inventory_vars.yml + tags: + - pre_upgrade + +- include: ../disable_node_excluders.yml + tags: + - pre_upgrade + +- include: ../../initialize_openshift_version.yml + tags: + - pre_upgrade + vars: + # Request specific openshift_release and let the openshift_version role handle converting this + # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if + # defined, and overriding the normal behavior of protecting the installed version + openshift_release: "{{ openshift_upgrade_target }}" + openshift_protect_installed_version: False + + # We skip the docker role at this point in upgrade to prevent + # unintended package, container, or config upgrades which trigger + # docker restarts. At this early stage of upgrade we can assume + # docker is configured and running. + skip_docker_role: True + +- name: Verify masters are already upgraded + hosts: oo_masters_to_config + tags: + - pre_upgrade + tasks: + - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run." + when: openshift.common.version != openshift_version + +- include: ../pre/verify_control_plane_running.yml + tags: + - pre_upgrade + +- name: Verify upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/verify_upgrade_targets.yml + tags: + - pre_upgrade + +- name: Verify docker upgrade targets + hosts: oo_nodes_to_upgrade + tasks: + - include: ../pre/tasks/verify_docker_upgrade_targets.yml + tags: + - pre_upgrade + +- include: ../pre/gate_checks.yml + tags: + - pre_upgrade + +# Pre-upgrade completed, nothing after this should be tagged pre_upgrade. + +# Separate step so we can execute in parallel and clear out anything unused +# before we get into the serialized upgrade process which will then remove +# remaining images if possible. +- name: Cleanup unused Docker images + hosts: oo_nodes_to_upgrade + tasks: + - include: ../cleanup_unused_images.yml + +- include: ../upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml new file mode 100644 index 000000000..f76fc68d1 --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml @@ -0,0 +1,23 @@ +--- +############################################################################### +# Pre upgrade checks for known data problems, if this playbook fails you should +# contact support. If you're not supported contact users@lists.openshift.com +############################################################################### +- name: Verify 3.7 specific upgrade checks + hosts: oo_first_master + roles: + - { role: lib_openshift } + + tasks: + - name: Check for invalid namespaces and SDN errors + oc_objectvalidator: + + - name: Confirm OpenShift authorization objects are in sync + command: > + {{ openshift.common.client_binary }} adm migrate authorization + when: not openshift.common.version_gte_3_7 | bool + changed_when: false + register: l_oc_result + until: l_oc_result.rc == 0 + retries: 4 + delay: 15 diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml index 48cc03b19..be2e6a15a 100644 --- a/playbooks/common/openshift-cluster/validate_hostnames.yml +++ b/playbooks/common/openshift-cluster/validate_hostnames.yml @@ -1,16 +1,23 @@ --- -- name: Gather and set facts for node hosts +- name: Validate node hostnames hosts: oo_nodes_to_config - roles: - - openshift_facts tasks: - - shell: + - name: Query DNS for IP address of {{ openshift.common.hostname }} + shell: getent ahostsv4 {{ openshift.common.hostname }} | head -n 1 | awk '{ print $1 }' register: lookupip changed_when: false failed_when: false - name: Warn user about bad openshift_hostname values pause: - prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort." + prompt: + The hostname {{ openshift.common.hostname }} for {{ ansible_nodename }} + doesn't resolve to an IP address owned by this host. Please set + openshift_hostname variable to a hostname that when resolved on the host + in question resolves to an IP address matching an interface on this + host. This host will fail liveness checks for pods utilizing hostPorts, + press ENTER to continue or CTRL-C to abort. seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}" - when: lookupip.stdout not in ansible_all_ipv4_addresses + when: + - lookupip.stdout != '127.0.0.1' + - lookupip.stdout not in ansible_all_ipv4_addresses diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml index 1b8106e0e..f2b85eea1 100644 --- a/playbooks/common/openshift-etcd/config.yml +++ b/playbooks/common/openshift-etcd/config.yml @@ -3,8 +3,10 @@ hosts: oo_etcd_to_config any_errors_fatal: true roles: + - role: os_firewall - role: openshift_etcd etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" - role: nickhammond.logrotate diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml new file mode 100644 index 000000000..a2af7bb21 --- /dev/null +++ b/playbooks/common/openshift-etcd/migrate.yml @@ -0,0 +1,147 @@ +--- +- name: Run pre-checks + hosts: oo_etcd_to_migrate + roles: + - role: etcd_migrate + r_etcd_migrate_action: check + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + +# TODO: This will be different for release-3.6 branch +- name: Prepare masters for etcd data migration + hosts: oo_masters_to_config + tasks: + - set_fact: + master_services: + - "{{ openshift.common.service_type + '-master-controllers' }}" + - "{{ openshift.common.service_type + '-master-api' }}" + - debug: + msg: "master service name: {{ master_services }}" + - name: Stop masters + service: + name: "{{ item }}" + state: stopped + with_items: "{{ master_services }}" + +- name: Backup v2 data + hosts: oo_etcd_to_migrate + gather_facts: no + roles: + - role: openshift_facts + - role: etcd_common + r_etcd_common_action: backup + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + r_etcd_common_backup_tag: pre-migration + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + +- name: Gate on etcd backup + hosts: localhost + connection: local + become: no + tasks: + - set_fact: + etcd_backup_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" + - set_fact: + etcd_backup_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_backup_completed) }}" + - fail: + msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" + when: + - etcd_backup_failed | length > 0 + +- name: Stop etcd + hosts: oo_etcd_to_migrate + gather_facts: no + pre_tasks: + - set_fact: + l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" + - name: Disable etcd members + service: + name: "{{ l_etcd_service }}" + state: stopped + +- name: Migrate data on first etcd + hosts: oo_etcd_to_migrate[0] + gather_facts: no + roles: + - role: etcd_migrate + r_etcd_migrate_action: migrate + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + +- name: Clean data stores on remaining etcd hosts + hosts: oo_etcd_to_migrate[1:] + gather_facts: no + roles: + - role: etcd_migrate + r_etcd_migrate_action: clean_data + r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" + etcd_peer: "{{ ansible_default_ipv4.address }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + post_tasks: + - name: Add etcd hosts + delegate_to: localhost + add_host: + name: "{{ item }}" + groups: oo_new_etcd_to_config + ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" + ansible_become: "{{ g_sudo | default(omit) }}" + with_items: "{{ groups.oo_etcd_to_migrate[1:] | default([]) }}" + changed_when: no + - name: Set success + set_fact: + r_etcd_migrate_success: true + +- include: ./scaleup.yml + +- name: Gate on etcd migration + hosts: oo_masters_to_config + gather_facts: no + tasks: + - set_fact: + etcd_migration_completed: "{{ hostvars + | oo_select_keys(groups.oo_etcd_to_migrate) + | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}" + - set_fact: + etcd_migration_failed: "{{ groups.oo_etcd_to_migrate | difference(etcd_migration_completed) }}" + +- name: Add TTLs on the first master + hosts: oo_first_master[0] + roles: + - role: etcd_migrate + r_etcd_migrate_action: add_ttls + etcd_peer: "{{ hostvars[groups.oo_etcd_to_migrate.0].ansible_default_ipv4.address }}" + etcd_url_scheme: "https" + etcd_peer_url_scheme: "https" + when: etcd_migration_failed | length == 0 + +- name: Configure masters if etcd data migration is succesfull + hosts: oo_masters_to_config + roles: + - role: etcd_migrate + r_etcd_migrate_action: configure + when: etcd_migration_failed | length == 0 + tasks: + - debug: + msg: "Skipping master re-configuration since migration failed." + when: + - etcd_migration_failed | length > 0 + - name: Start master services + service: + name: "{{ item }}" + state: started + register: service_status + # Sometimes the master-api, resp. master-controllers fails to start for the first time + until: service_status.state is defined and service_status.state == "started" + retries: 5 + delay: 10 + with_items: "{{ master_services[::-1] }}" + - fail: + msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}" + when: + - etcd_migration_failed | length > 0 diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml index 196c86f28..af1ef245a 100644 --- a/playbooks/common/openshift-etcd/restart.yml +++ b/playbooks/common/openshift-etcd/restart.yml @@ -5,5 +5,5 @@ tasks: - name: restart etcd service: - name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}" + name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}" state: restarted diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml new file mode 100644 index 000000000..5f8bb1c7a --- /dev/null +++ b/playbooks/common/openshift-etcd/scaleup.yml @@ -0,0 +1,55 @@ +--- +- name: Gather facts + hosts: oo_etcd_to_config:oo_new_etcd_to_config + roles: + - openshift_etcd_facts + post_tasks: + - set_fact: + etcd_hostname: "{{ etcd_hostname }}" + etcd_ip: "{{ etcd_ip }}" + +- name: Configure etcd + hosts: oo_new_etcd_to_config + serial: 1 + any_errors_fatal: true + vars: + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + pre_tasks: + - name: Add new etcd members to cluster + command: > + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} + --key-file {{ etcd_peer_key_file }} + --ca-file {{ etcd_peer_ca_file }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + member add {{ etcd_hostname }} {{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }} + delegate_to: "{{ etcd_ca_host }}" + register: etcd_add_check + retries: 3 + delay: 10 + until: etcd_add_check.rc == 0 + roles: + - role: os_firewall + when: etcd_add_check.rc == 0 + - role: openshift_etcd + when: etcd_add_check.rc == 0 + etcd_peers: "{{ groups.oo_etcd_to_config | union(groups.oo_new_etcd_to_config)| default([], true) }}" + etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" + etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + etcd_initial_cluster_state: "existing" + initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}" + etcd_ca_setup: False + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" + - role: nickhammond.logrotate + when: etcd_add_check.rc == 0 + post_tasks: + - name: Verify cluster is stable + command: > + /usr/bin/etcdctl --cert-file {{ etcd_peer_cert_file }} + --key-file {{ etcd_peer_key_file }} + --ca-file {{ etcd_peer_ca_file }} + -C {{ etcd_peer_url_scheme }}://{{ hostvars[etcd_ca_host].etcd_hostname }}:{{ etcd_client_port }} + cluster-health + register: scaleup_health + retries: 3 + delay: 30 + until: scaleup_health.rc == 0 diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml deleted file mode 100644 index ced4bddc5..000000000 --- a/playbooks/common/openshift-etcd/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_masters host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_etcd - add_host: - name: "{{ item }}" - groups: g_service_etcd - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change etcd state on etcd instance(s) - hosts: g_service_etcd - connection: ssh - gather_facts: no - tasks: - - service: name=etcd state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml new file mode 100644 index 000000000..d9de578f3 --- /dev/null +++ b/playbooks/common/openshift-glusterfs/config.yml @@ -0,0 +1,26 @@ +--- +- name: Open firewall ports for GlusterFS nodes + hosts: glusterfs + tasks: + - include_role: + name: openshift_storage_glusterfs + tasks_from: firewall.yml + when: + - openshift_storage_glusterfs_is_native | default(True) | bool + +- name: Open firewall ports for GlusterFS registry nodes + hosts: glusterfs_registry + tasks: + - include_role: + name: openshift_storage_glusterfs + tasks_from: firewall.yml + when: + - openshift_storage_glusterfs_registry_is_native | default(True) | bool + +- name: Configure GlusterFS + hosts: oo_first_master + tasks: + - name: setup glusterfs + include_role: + name: openshift_storage_glusterfs + when: groups.oo_glusterfs_to_config | default([]) | count > 0 diff --git a/playbooks/openstack/openshift-cluster/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins index 99a95e4ca..99a95e4ca 120000 --- a/playbooks/openstack/openshift-cluster/filter_plugins +++ b/playbooks/common/openshift-glusterfs/filter_plugins diff --git a/playbooks/libvirt/openshift-cluster/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins index ac79701db..ac79701db 120000 --- a/playbooks/libvirt/openshift-cluster/lookup_plugins +++ b/playbooks/common/openshift-glusterfs/lookup_plugins diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml new file mode 100644 index 000000000..80cf7529e --- /dev/null +++ b/playbooks/common/openshift-glusterfs/registry.yml @@ -0,0 +1,49 @@ +--- +- include: config.yml + +- name: Initialize GlusterFS registry PV and PVC vars + hosts: oo_first_master + tags: hosted + tasks: + - set_fact: + glusterfs_pv: [] + glusterfs_pvc: [] + + - set_fact: + glusterfs_pv: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + storage: + glusterfs: + endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" + path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" + readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" + glusterfs_pvc: + - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" + capacity: "{{ openshift.hosted.registry.storage.volume.size }}" + access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" + when: openshift.hosted.registry.storage.glusterfs.swap + +- name: Create persistent volumes + hosts: oo_first_master + tags: + - hosted + vars: + persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" + persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" + roles: + - role: openshift_persistent_volumes + when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 + +- name: Create Hosted Resources + hosts: oo_first_master + tags: + - hosted + pre_tasks: + - set_fact: + openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" + when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" + roles: + - role: openshift_hosted diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles new file mode 120000 index 000000000..e2b799b9d --- /dev/null +++ b/playbooks/common/openshift-glusterfs/roles @@ -0,0 +1 @@ +../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml index c414913bf..09ed81a83 100644 --- a/playbooks/common/openshift-loadbalancer/config.yml +++ b/playbooks/common/openshift-loadbalancer/config.yml @@ -12,5 +12,7 @@ openshift_use_nuage | default(false), nuage_mon_rest_server_port | default(none))) + openshift_loadbalancer_additional_backends | default([]) }}" + openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}" roles: + - role: os_firewall - role: openshift_loadbalancer diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml deleted file mode 100644 index d3762c961..000000000 --- a/playbooks/common/openshift-loadbalancer/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_nodes host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_lb - add_host: - name: "{{ item }}" - groups: g_service_lb - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on lb instance(s) - hosts: g_service_lb - connection: ssh - gather_facts: no - tasks: - - service: name=haproxy state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml index c0ea93d2c..c0ea93d2c 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-master/additional_config.yml diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 7a334e771..b29b9ef4f 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -1,10 +1,31 @@ --- +- name: Disable excluders + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" + - name: Gather and set facts for master hosts hosts: oo_masters_to_config vars: t_oo_option_master_debug_level: "{{ lookup('oo_option', 'openshift_master_debug_level') }}" pre_tasks: + # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 + # + # When scaling up a cluster upgraded from OCP <= 3.5, ensure that + # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing + # masters, or absent if such is the case. + - name: Detect if this host is a new master in a scale up + set_fact: + g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" + + - name: Scaleup Detection + debug: + var: g_openshift_master_is_scaleup + - name: Check for RPM generated config marker file .config_managed stat: path: /etc/origin/.config_managed @@ -48,12 +69,6 @@ - set_fact: openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}" when: openshift_hosted_metrics_resolution is not defined - - set_fact: - openshift_hosted_metrics_deployer_prefix: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_prefix') | default('openshift') }}" - when: openshift_hosted_metrics_deployer_prefix is not defined - - set_fact: - openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}" - when: openshift_hosted_metrics_deployer_version is not defined roles: - openshift_facts post_tasks: @@ -75,7 +90,7 @@ ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -- name: Determine if session secrets must be generated +- name: Inspect state of first master config settings hosts: oo_first_master roles: - role: openshift_facts @@ -85,6 +100,60 @@ local_facts: session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" + - name: Check for existing configuration + stat: + path: /etc/origin/master/master-config.yaml + register: master_config_stat + + - name: Set clean install fact + set_fact: + l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + + - name: Determine if etcd3 storage is in use + command: grep -Pzo "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q + register: etcd3_grep + failed_when: false + changed_when: false + + - name: Set etcd3 fact + set_fact: + l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" + + - name: Check if atomic-openshift-master sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master + register: l_aom_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master + register: l_default_registry_defined + when: l_aom_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-api sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-api + register: l_aom_api_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api + register: l_default_registry_defined_api + when: l_aom_api_exists.stat.exists | bool + + - name: Check if atomic-openshift-master-controllers sysconfig exists yet + stat: + path: /etc/sysconfig/atomic-openshift-master-controllers + register: l_aom_controllers_exists + + - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present + command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers + register: l_default_registry_defined_controllers + when: l_aom_controllers_exists.stat.exists | bool + + - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value + set_fact: + l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" + l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" + l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" - name: Generate master session secrets hosts: oo_first_master @@ -115,7 +184,11 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" + openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) + | oo_collect('openshift.common.ip') | default([]) | join(',') + }}" roles: + - role: os_firewall - role: openshift_master openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_etcd_hosts: "{{ hostvars @@ -123,12 +196,34 @@ | oo_collect('openshift.common.hostname') | default(none, true) }}" openshift_master_hosts: "{{ groups.oo_masters_to_config }}" + r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/master" etcd_cert_prefix: "master.etcd-" + r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" + r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" + openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" + openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" + openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" + openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" + - role: nuage_master + when: openshift.common.use_nuage | bool + - role: calico_master + when: openshift.common.use_calico | bool post_tasks: - name: Create group for deployment type group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} changed_when: False + +- include: additional_config.yml + when: not g_openshift_master_is_scaleup + +- name: Re-enable excluder if it was previously enabled + hosts: oo_masters_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index b35368bf1..6fec346c3 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -1,5 +1,5 @@ --- -- include: ../../common/openshift-master/validate_restart.yml +- include: validate_restart.yml - name: Restart masters hosts: oo_masters_to_config @@ -12,8 +12,8 @@ roles: - openshift_facts post_tasks: - - include: ../../common/openshift-master/restart_hosts.yml + - include: restart_hosts.yml when: openshift_rolling_restart_mode | default('services') == 'system' - - include: ../../common/openshift-master/restart_services.yml + - include: restart_services.yml when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index 67ba0aa2e..a5dbe0590 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -37,3 +37,4 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml index 508b5a3ac..4f8b758fd 100644 --- a/playbooks/common/openshift-master/restart_services.yml +++ b/playbooks/common/openshift-master/restart_services.yml @@ -1,9 +1,4 @@ --- -- name: Restart master - service: - name: "{{ openshift.common.service_type }}-master" - state: restarted - when: not openshift_master_ha | bool - name: Restart master API service: name: "{{ openshift.common.service_type }}-master-api" @@ -15,6 +10,7 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: openshift_master_ha | bool - name: Restart master controllers service: diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml index 18e5c665f..17f9ef4bc 100644 --- a/playbooks/common/openshift-master/scaleup.yml +++ b/playbooks/common/openshift-master/scaleup.yml @@ -1,11 +1,4 @@ --- -- include: ../openshift-cluster/evaluate_groups.yml - -- name: Gather facts - hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config - roles: - - openshift_facts - - name: Update master count hosts: oo_masters:!oo_masters_to_config serial: 1 @@ -50,16 +43,6 @@ delay: 1 changed_when: false -- name: Configure docker hosts - hosts: oo_masters_to-config:oo_nodes_to_config - vars: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" - docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" - docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" - roles: - - openshift_facts - - openshift_docker - - include: ../openshift-master/config.yml - include: ../openshift-loadbalancer/config.yml diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml deleted file mode 100644 index 48a2731aa..000000000 --- a/playbooks/common/openshift-master/service.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Populate g_service_masters host group if needed - hosts: localhost - gather_facts: no - connection: local - become: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_masters - add_host: - name: "{{ item }}" - groups: g_service_masters - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on master instance(s) - hosts: g_service_masters - connection: ssh - gather_facts: no - tasks: - - service: name={{ openshift.common.service_type }}-master state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml deleted file mode 100644 index b1e35e4b1..000000000 --- a/playbooks/common/openshift-nfs/service.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -- name: Populate g_service_nfs host group if needed - hosts: localhost - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_nfs - add_host: - name: "{{ item }}" - groups: g_service_nfs - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on nfs instance(s) - hosts: g_service_nfs - connection: ssh - gather_facts: no - tasks: - - service: name=nfs-server state="{{ new_cluster_state }}" diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index 6c5a299c1..c13417714 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -1,24 +1,11 @@ --- -- name: Gather and set facts for node hosts +- name: Disable excluders hosts: oo_nodes_to_config - vars: - t_oo_option_node_debug_level: "{{ lookup('oo_option', 'openshift_node_debug_level') }}" - pre_tasks: - - set_fact: - openshift_node_debug_level: "{{ t_oo_option_node_debug_level }}" - when: openshift_node_debug_level is not defined and t_oo_option_node_debug_level != "" + gather_facts: no roles: - - openshift_facts - tasks: - # Since the master is generating the node certificates before they are - # configured, we need to make sure to set the node properties beforehand if - # we do not want the defaults - - openshift_facts: - role: node - local_facts: - labels: "{{ openshift_node_labels | default(None) }}" - annotations: "{{ openshift_node_annotations | default(None) }}" - schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}" + - role: openshift_excluder + r_openshift_excluder_action: disable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - name: Evaluate node groups hosts: localhost @@ -32,7 +19,11 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_nodes_to_config | default([]) }}" - when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + when: + - hostvars[item].openshift is defined + - hostvars[item].openshift.common is defined + - hostvars[item].openshift.common.is_containerized | bool + - (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) changed_when: False - name: Configure containerized nodes @@ -47,9 +38,9 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" + roles: + - role: os_firewall - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" @@ -64,9 +55,8 @@ | union(groups['oo_etcd_to_config'] | default([]))) | oo_collect('openshift.common.hostname') | default([]) | join (',') }}" - when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and - openshift_generate_no_proxy_hosts | default(True) | bool }}" roles: + - role: os_firewall - role: openshift_node openshift_ca_host: "{{ groups.oo_first_master.0 }}" @@ -82,6 +72,8 @@ etcd_cert_subdir: "openshift-node-{{ openshift.common.hostname }}" etcd_cert_config_dir: "{{ openshift.common.config_base }}/node" when: openshift.common.use_flannel | bool + - role: calico + when: openshift.common.use_calico | bool - role: nuage_node when: openshift.common.use_nuage | bool - role: contiv @@ -94,3 +86,11 @@ - name: Create group for deployment type group_by: key=oo_nodes_deployment_type_{{ openshift.common.deployment_type }} changed_when: False + +- name: Re-enable excluder if it was previously enabled + hosts: oo_nodes_to_config + gather_facts: no + roles: + - role: openshift_excluder + r_openshift_excluder_action: enable + r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-node/network_manager.yml b/playbooks/common/openshift-node/network_manager.yml new file mode 100644 index 000000000..b3a7399dc --- /dev/null +++ b/playbooks/common/openshift-node/network_manager.yml @@ -0,0 +1,28 @@ +--- +- include: ../openshift-cluster/evaluate_groups.yml + +- name: Install and configure NetworkManager + hosts: oo_all_hosts + become: yes + tasks: + - name: install NetworkManager + package: + name: 'NetworkManager' + state: present + + - name: configure NetworkManager + lineinfile: + dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}" + regexp: '^{{ item }}=' + line: '{{ item }}=yes' + state: present + create: yes + with_items: + - 'USE_PEERDNS' + - 'NM_CONTROLLED' + + - name: enable and start NetworkManager + service: + name: 'NetworkManager' + state: started + enabled: yes diff --git a/playbooks/common/openshift-node/restart.yml b/playbooks/common/openshift-node/restart.yml index 441b100e9..c3beb59b7 100644 --- a/playbooks/common/openshift-node/restart.yml +++ b/playbooks/common/openshift-node/restart.yml @@ -11,6 +11,10 @@ service: name: docker state: restarted + register: l_docker_restart_docker_in_node_result + until: not l_docker_restart_docker_in_node_result | failed + retries: 3 + delay: 30 - name: Update docker facts openshift_facts: @@ -23,7 +27,6 @@ with_items: - etcd_container - openvswitch - - "{{ openshift.common.service_type }}-master" - "{{ openshift.common.service_type }}-master-api" - "{{ openshift.common.service_type }}-master-controllers" - "{{ openshift.common.service_type }}-node" @@ -36,6 +39,7 @@ state: started delay: 10 port: "{{ openshift.master.api_port }}" + timeout: 600 when: inventory_hostname in groups.oo_masters_to_config - name: restart node @@ -51,7 +55,7 @@ register: node_output delegate_to: "{{ groups.oo_first_master.0 }}" when: inventory_hostname in groups.oo_nodes_to_config - until: node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True + until: node_output.results.returncode == 0 and node_output.results.results[0].status.conditions | selectattr('type', 'match', '^Ready$') | map(attribute='status') | join | bool == True # Give the node two minutes to come back online. retries: 24 delay: 5 diff --git a/playbooks/common/openshift-node/scaleup.yml b/playbooks/common/openshift-node/scaleup.yml deleted file mode 100644 index bb3b1e780..000000000 --- a/playbooks/common/openshift-node/scaleup.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- include: ../openshift-cluster/evaluate_groups.yml - -- name: Gather facts - hosts: oo_etcd_to_config:oo_masters_to_config:oo_nodes_to_config - roles: - - openshift_facts - -- name: Gather and set facts for first master - hosts: oo_first_master - vars: - openshift_master_count: "{{ groups.oo_masters | length }}" - pre_tasks: - - set_fact: - openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" - when: openshift_master_default_subdomain is not defined - roles: - - openshift_master_facts - -- name: Configure docker hosts - hosts: oo_nodes_to_config - vars: - docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') | oo_split }}" - docker_insecure_registries: "{{ lookup('oo_option', 'docker_insecure_registries') | oo_split }}" - docker_blocked_registries: "{{ lookup('oo_option', 'docker_blocked_registries') | oo_split }}" - roles: - - openshift_facts - - openshift_docker - -- include: ../openshift-node/config.yml diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml deleted file mode 100644 index 130a5416f..000000000 --- a/playbooks/common/openshift-node/service.yml +++ /dev/null @@ -1,26 +0,0 @@ ---- -- name: Populate g_service_nodes host group if needed - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - fail: msg="new_cluster_state is required to be injected in this playbook" - when: new_cluster_state is not defined - - - name: Evaluate g_service_nodes - add_host: - name: "{{ item }}" - groups: g_service_nodes - with_items: "{{ oo_host_group_exp | default([]) }}" - changed_when: False - -- name: Change state on node instance(s) - hosts: g_service_nodes - connection: ssh - gather_facts: no - tasks: - - name: Change state on node instance(s) - service: - name: "{{ service_type }}-node" - state: "{{ new_cluster_state }}" diff --git a/playbooks/gce/README.md b/playbooks/gce/README.md deleted file mode 100644 index 0514d6f50..000000000 --- a/playbooks/gce/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# GCE playbooks - -This playbook directory is meant to be driven by [`bin/cluster`](../../bin), -which is community supported and most use is considered deprecated. diff --git a/playbooks/gce/openshift-cluster/add_nodes.yml b/playbooks/gce/openshift-cluster/add_nodes.yml deleted file mode 100644 index 765e03fdc..000000000 --- a/playbooks/gce/openshift-cluster/add_nodes.yml +++ /dev/null @@ -1,43 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - vars: - oo_extend_env: True - tasks: - - fail: - msg: Deployment type not supported for gce provider yet - when: deployment_type == 'enterprise' - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - -- include: scaleup.yml -- include: list.yml diff --git a/playbooks/gce/openshift-cluster/cluster_hosts.yml b/playbooks/gce/openshift-cluster/cluster_hosts.yml deleted file mode 100644 index 74e2420db..000000000 --- a/playbooks/gce/openshift-cluster/cluster_hosts.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([]) - | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}" - -g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}" - -g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}" - -g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" - -g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" - -g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}" - -g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}" - -g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}" - -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}" - -g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}" diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml deleted file mode 100644 index 8e46c5919..000000000 --- a/playbooks/gce/openshift-cluster/config.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ g_all_hosts | default([]) }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- include: ../../common/openshift-cluster/config.yml - vars: - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - g_nodeonmaster: true - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_hostname: "{{ gce_private_ip }}" - openshift_hosted_registry_selector: 'type=infra' - openshift_hosted_router_selector: 'type=infra' - openshift_master_cluster_method: 'native' - openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" - os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" - openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" - openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/gce/openshift-cluster/launch.yml b/playbooks/gce/openshift-cluster/launch.yml deleted file mode 100644 index 7532a678b..000000000 --- a/playbooks/gce/openshift-cluster/launch.yml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - fail: msg="Deployment type not supported for gce provider yet" - when: deployment_type == 'enterprise' - - - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ etcd_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - gce_machine_type: "{{ lookup('env', 'gce_machine_etcd_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_etcd_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - gce_machine_type: "{{ lookup('env', 'gce_machine_master_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_master_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - gce_machine_type: "{{ lookup('env', 'gce_machine_node_type') | default(lookup('env', 'gce_machine_type'), true) }}" - gce_machine_image: "{{ lookup('env', 'gce_machine_node_image') | default(lookup('env', 'gce_machine_image'), true) }}" - - - add_host: - name: "{{ master_names.0 }}" - groups: service_master - when: master_names is defined and master_names.0 is defined - -- include: update.yml - -- include: list.yml diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml deleted file mode 100644 index 34ab09533..000000000 --- a/playbooks/gce/openshift-cluster/list.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Generate oo_list_hosts group - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: scratch_group=tag_clusterid-{{ cluster_id }} - when: cluster_id != '' - - set_fact: scratch_group=all - when: cluster_id == '' - - add_host: - name: "{{ item }}" - groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - oo_public_ipv4: "{{ hostvars[item].gce_public_ip }}" - oo_private_ipv4: "{{ hostvars[item].gce_private_ip }}" - with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}" - - debug: - msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/gce/openshift-cluster/service.yml b/playbooks/gce/openshift-cluster/service.yml deleted file mode 100644 index 13b267976..000000000 --- a/playbooks/gce/openshift-cluster/service.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: Call same systemctl command for openshift on all instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - - cluster_hosts.yml - tasks: - - fail: msg="cluster_id is required to be injected in this playbook" - when: cluster_id is not defined - - - add_host: - name: "{{ item }}" - groups: g_service_nodes - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ node_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - - - add_host: - name: "{{ item }}" - groups: g_service_masters - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ master_hosts | default([]) | difference(['localhost']) | difference(groups.status_terminated) }}" - -- include: ../../common/openshift-node/service.yml -- include: ../../common/openshift-master/service.yml diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml deleted file mode 100644 index 65dd2b71e..000000000 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Launch instance(s) - gce: - instance_names: "{{ instances|join(',') }}" - machine_type: "{{ gce_machine_type | default(deployment_vars[deployment_type].machine_type, true) }}" - image: "{{ gce_machine_image | default(deployment_vars[deployment_type].image, true) }}" - service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - project_id: "{{ lookup('env', 'gce_project_id') }}" - zone: "{{ lookup('env', 'zone') }}" - network: "{{ lookup('env', 'network') }}" - subnetwork: "{{ lookup('env', 'subnetwork') | default(omit, True) }}" - # unsupported in 1.9.+ - #service_account_permissions: "datastore,logging-write" - tags: - - created-by-{{ lookup('env', 'LOGNAME') | regex_replace('[^a-z0-9]+', '') | default(cluster, true) }} - - environment-{{ cluster_env }} - - clusterid-{{ cluster_id }} - - host-type-{{ type }} - - sub-host-type-{{ g_sub_host_type }} - metadata: - startup-script: | - #!/bin/bash - echo "Defaults:{{ deployment_vars[deployment_type].ssh_user }} !requiretty" > /etc/sudoers.d/99-{{ deployment_vars[deployment_type].ssh_user }} - - when: instances |length > 0 - register: gce - -- set_fact: - node_label: - # There doesn't seem to be a way to get the region directly, so parse it out of the zone. - region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" - type: "{{ g_sub_host_type }}" - when: instances |length > 0 and type == "node" - -- set_fact: - node_label: - # There doesn't seem to be a way to get the region directly, so parse it out of the zone. - region: "{{ gce.zone | regex_replace('^(.*)-.*$', '\\\\1') }}" - type: "{{ type }}" - when: instances |length > 0 and type != "node" - -- name: Add new instances to groups and set variables needed - add_host: - hostname: "{{ item.name }}" - ansible_ssh_host: "{{ item.public_ip }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: "{{ item.tags | oo_prepend_strings_in_list('tag_') | join(',') }}" - gce_public_ip: "{{ item.public_ip }}" - gce_private_ip: "{{ item.private_ip }}" - openshift_node_labels: "{{ node_label }}" - with_items: "{{ gce.instance_data | default([], true) }}" - -- name: Wait for ssh - wait_for: port=22 host={{ item.public_ip }} - with_items: "{{ gce.instance_data | default([], true) }}" - -- name: Wait for user setup - command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" - register: result - until: result.rc == 0 - retries: 30 - delay: 5 - with_items: "{{ gce.instance_data | default([], true) }}" diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml deleted file mode 100644 index afe269b7c..000000000 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ /dev/null @@ -1,58 +0,0 @@ ---- -- name: Terminate instance(s) - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - tasks: - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}" - -- name: Unsubscribe VMs - hosts: oo_hosts_to_terminate - vars_files: - - vars.yml - roles: - - role: rhel_unsubscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - -- name: Terminate instances(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Terminate instances that were previously launched - local_action: - module: gce - state: 'absent' - name: "{{ item }}" - service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" - pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" - project_id: "{{ lookup('env', 'gce_project_id') }}" - zone: "{{ lookup('env', 'zone') }}" - with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}" - when: item is defined - -#- include: ../openshift-node/terminate.yml -# vars: -# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" -# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" -# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" -# -#- include: ../openshift-master/terminate.yml -# vars: -# gce_service_account_email: "{{ lookup('env', 'gce_service_account_email_address') }}" -# gce_pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}" -# gce_project_id: "{{ lookup('env', 'gce_project_id') }}" diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml deleted file mode 100644 index 6d2af3d26..000000000 --- a/playbooks/gce/openshift-cluster/update.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- name: Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ g_all_hosts | default([]) }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: config.yml diff --git a/playbooks/gce/openshift-cluster/vars.yml b/playbooks/gce/openshift-cluster/vars.yml deleted file mode 100644 index 13c754c1e..000000000 --- a/playbooks/gce/openshift-cluster/vars.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -debug_level: 2 - -deployment_rhel7_ent_base: - image: "{{ lookup('oo_option', 'image_name') | default('rhel-7', True) }}" - machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}" - ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}" - become: yes - -deployment_vars: - origin: - image: "{{ lookup('oo_option', 'image_name') | default('centos-7', True) }}" - machine_type: "{{ lookup('oo_option', 'machine_type') | default('n1-standard-1', True) }}" - ssh_user: "{{ lookup('env', 'gce_ssh_user') | default(ansible_ssh_user, true) }}" - become: yes - enterprise: "{{ deployment_rhel7_ent_base }}" - openshift-enterprise: "{{ deployment_rhel7_ent_base }}" - atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/libvirt/README.md b/playbooks/libvirt/README.md deleted file mode 100644 index 3ce46a76f..000000000 --- a/playbooks/libvirt/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# libvirt playbooks - -This playbook directory is meant to be driven by [`bin/cluster`](../../bin), -which is community supported and most use is considered deprecated. diff --git a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml b/playbooks/libvirt/openshift-cluster/cluster_hosts.yml deleted file mode 100644 index 74e2420db..000000000 --- a/playbooks/libvirt/openshift-cluster/cluster_hosts.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -g_all_hosts: "{{ groups['tag_clusterid-' ~ cluster_id] | default([]) - | intersect(groups['tag_environment-' ~ cluster_env] | default([])) }}" - -g_etcd_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-etcd'] | default([])) }}" - -g_lb_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-lb'] | default([])) }}" - -g_nfs_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-nfs'] | default([])) }}" - -g_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-master'] | default([])) }}" - -g_new_master_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-master'] | default([])) }}" - -g_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-node'] | default([])) }}" - -g_new_node_hosts: "{{ g_all_hosts | intersect(groups['tag_host-type-new-node'] | default([])) }}" - -g_infra_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-infra'] | default([])) }}" - -g_compute_hosts: "{{ g_node_hosts | intersect(groups['tag_sub-host-type-compute'] | default([])) }}" diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml deleted file mode 100644 index 44b0f5a3c..000000000 --- a/playbooks/libvirt/openshift-cluster/config.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -# TODO: need to figure out a plan for setting hostname, currently the default -# is localhost, so no hostname value (or public_hostname) value is getting -# assigned - -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- include: ../../common/openshift-cluster/config.yml - vars: - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - g_nodeonmaster: true - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_hosted_registry_selector: 'type=infra' - openshift_hosted_router_selector: 'type=infra' - openshift_master_cluster_method: 'native' - openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" - os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" - openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" - openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" - openshift_use_dnsmasq: false diff --git a/playbooks/libvirt/openshift-cluster/launch.yml b/playbooks/libvirt/openshift-cluster/launch.yml deleted file mode 100644 index 2475b9d6b..000000000 --- a/playbooks/libvirt/openshift-cluster/launch.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - vars: - image_url: "{{ deployment_vars[deployment_type].image.url }}" - image_sha256: "{{ deployment_vars[deployment_type].image.sha256 }}" - image_name: "{{ deployment_vars[deployment_type].image.name }}" - image_compression: "{{ deployment_vars[deployment_type].image.compression }}" - tasks: - - include: tasks/configure_libvirt.yml - - - include: ../../common/openshift-cluster/tasks/set_etcd_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ etcd_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - - - include: ../../common/openshift-cluster/tasks/set_master_launch_facts.yml - - include: tasks/launch_instances.yml - vars: - instances: "{{ master_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "default" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "compute" - count: "{{ num_nodes }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - - - include: ../../common/openshift-cluster/tasks/set_node_launch_facts.yml - vars: - type: "infra" - count: "{{ num_infra }}" - - include: tasks/launch_instances.yml - vars: - instances: "{{ node_names }}" - cluster: "{{ cluster_id }}" - type: "{{ k8s_type }}" - g_sub_host_type: "{{ sub_host_type }}" - -- include: update.yml - -- include: list.yml diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml deleted file mode 100644 index 579cd7ac6..000000000 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ /dev/null @@ -1,23 +0,0 @@ ---- -- name: Generate oo_list_hosts group - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: scratch_group=tag_clusterid-{{ cluster_id }} - when: cluster_id != '' - - set_fact: scratch_group=all - when: cluster_id == '' - - add_host: - name: "{{ item }}" - groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - oo_public_ipv4: "" - oo_private_ipv4: "{{ hostvars[item].libvirt_ip_address }}" - with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - - debug: - msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster }}" diff --git a/playbooks/libvirt/openshift-cluster/service.yml b/playbooks/libvirt/openshift-cluster/service.yml deleted file mode 100644 index 8bd24a8cf..000000000 --- a/playbooks/libvirt/openshift-cluster/service.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -# TODO: need to figure out a plan for setting hostname, currently the default -# is localhost, so no hostname value (or public_hostname) value is getting -# assigned - -- name: Call same systemctl command for openshift on all instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - fail: msg="cluster_id is required to be injected in this playbook" - when: cluster_id is not defined - - - name: Evaluate g_service_masters - add_host: - name: "{{ item }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: g_service_masters - with_items: "{{ g_master_hosts | default([]) }}" - - - name: Evaluate g_service_nodes - add_host: - name: "{{ item }}" - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: g_service_nodes - with_items: "{{ g_node_hosts | default([]) }}" - -- include: ../../common/openshift-node/service.yml -- include: ../../common/openshift-master/service.yml diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml deleted file mode 100644 index f237c1a60..000000000 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- include: configure_libvirt_storage_pool.yml - when: libvirt_storage_pool is defined and libvirt_storage_pool_path is defined - -- include: configure_libvirt_network.yml - when: libvirt_network is defined diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml deleted file mode 100644 index b42ca83af..000000000 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml +++ /dev/null @@ -1,11 +0,0 @@ ---- -- name: Create the libvirt network for OpenShift - virt_net: - name: '{{ libvirt_network }}' - state: '{{ item }}' - autostart: 'yes' - xml: "{{ lookup('template', 'network.xml') }}" - uri: '{{ libvirt_uri }}' - with_items: - - present - - active diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml deleted file mode 100644 index 8685624ec..000000000 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml +++ /dev/null @@ -1,30 +0,0 @@ ---- -- name: Create libvirt storage directory for openshift - file: - dest: "{{ libvirt_storage_pool_path }}" - state: directory - -# We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set. -- acl: - default: '{{ item.default }}' - entity: kvm - etype: group - name: "{{ libvirt_storage_pool_path }}" - permissions: '{{ item.permissions }}' - state: present - with_items: - - default: no - permissions: x - - default: yes - permissions: rwx - -- name: Create the libvirt storage pool for OpenShift - virt_pool: - name: '{{ libvirt_storage_pool }}' - state: '{{ item }}' - autostart: 'yes' - xml: "{{ lookup('template', 'storage-pool.xml') }}" - uri: '{{ libvirt_uri }}' - with_items: - - present - - active diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml deleted file mode 100644 index 78581fdfe..000000000 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ /dev/null @@ -1,138 +0,0 @@ ---- -# TODO: Add support for choosing base image based on deployment_type and os -# wanted (os wanted needs support added in bin/cluster with sane defaults: -# fedora/centos for origin, rhel for enterprise) - -# TODO: create a role to encapsulate some of this complexity, possibly also -# create a module to manage the storage tasks, network tasks, and possibly -# even handle the libvirt tasks to set metadata in the domain xml and be able -# to create/query data about vms without having to use xml the python libvirt -# bindings look like a good candidate for this - -- name: Download Base Cloud image - get_url: - url: '{{ image_url }}' - sha256sum: '{{ image_sha256 }}' - dest: '{{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | difference([""]) | join(".") }}' - when: '{{ ( lookup("oo_option", "skip_image_download") | default("no", True) | lower ) in ["false", "no"] }}' - register: downloaded_image - -- name: Uncompress xz compressed base cloud image - command: 'unxz -kf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' - args: - creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' - when: image_compression in ["xz"] and downloaded_image.changed - -- name: Uncompress tgz compressed base cloud image - command: 'tar zxvf {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' - args: - creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' - when: image_compression in ["tgz"] and downloaded_image.changed - -- name: Uncompress gzip compressed base cloud image - command: 'gunzip {{ libvirt_storage_pool_path }}/{{ [image_name, image_compression] | join(".") }}' - args: - creates: '{{ libvirt_storage_pool_path }}/{{ image_name }}' - when: image_compression in ["gz"] and downloaded_image.changed - -- name: Create the cloud-init config drive path - file: - dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - state: directory - with_items: '{{ instances }}' - -- name: Create the cloud-init config drive files - template: - src: '{{ item[1] }}' - dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}' - with_nested: - - '{{ instances }}' - - [ user-data, meta-data ] - -- name: Create the cloud-init config drive - command: 'genisoimage -output {{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso -volid cidata -joliet -rock user-data meta-data' - args: - chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' - with_items: '{{ instances }}' - -- name: Refresh the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}' - -- name: Create VM drives - command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2' - with_items: '{{ instances }}' - -- name: Create VM docker drives - command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0' - with_items: '{{ instances }}' - -- name: Create VMs - virt: - name: '{{ item }}' - command: define - xml: "{{ lookup('template', '../templates/domain.xml') }}" - uri: '{{ libvirt_uri }}' - with_items: '{{ instances }}' - -- name: Start VMs - virt: - name: '{{ item }}' - state: running - uri: '{{ libvirt_uri }}' - with_items: '{{ instances }}' - -- name: Wait for the VMs to get an IP - shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}''' - register: nb_allocated_ips - until: nb_allocated_ips.stdout == '{{ instances | length }}' - retries: 60 - delay: 3 - when: instances | length != 0 - -- name: Collect IP addresses of the VMs - shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}''' - register: scratch_ip - with_items: '{{ instances }}' - -- set_fact: - ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}" - -- set_fact: - node_label: - type: "{{ g_sub_host_type }}" - when: instances | length > 0 and type == "node" - -- set_fact: - node_label: - type: "{{ type }}" - when: instances | length > 0 and type != "node" - -- name: Add new instances - add_host: - hostname: '{{ item.0 }}' - ansible_ssh_host: '{{ item.1 }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}" - openshift_node_labels: "{{ node_label }}" - libvirt_ip_address: "{{ item.1 }}" - with_together: - - '{{ instances }}' - - '{{ ips }}' - -- name: Wait for ssh - wait_for: - host: '{{ item }}' - port: 22 - with_items: '{{ ips }}' - -- name: Wait for openshift user setup - command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup' - register: result - until: result.rc == 0 - retries: 30 - delay: 1 - with_together: - - '{{ instances }}' - - '{{ ips }}' diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml deleted file mode 100644 index 88504a5f6..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/domain.xml +++ /dev/null @@ -1,65 +0,0 @@ -<domain type='kvm' id='8'> - <name>{{ item }}</name> - <memory unit='MiB'>{{ libvirt_instance_memory_mib }}</memory> - <metadata xmlns:ansible="https://github.com/ansible/ansible"> - <ansible:tags> - <ansible:tag>environment-{{ cluster_env }}</ansible:tag> - <ansible:tag>clusterid-{{ cluster }}</ansible:tag> - <ansible:tag>host-type-{{ type }}</ansible:tag> - <ansible:tag>sub-host-type-{{ g_sub_host_type }}</ansible:tag> - </ansible:tags> - </metadata> - <vcpu placement='static'>{{ libvirt_instance_vcpu }}</vcpu> - <os> - <type arch='x86_64' machine='pc'>hvm</type> - <boot dev='hd'/> - </os> - <features> - <acpi/> - <apic/> - <pae/> - </features> - <cpu mode='host-model'> - <model fallback='allow'/> - </cpu> - <clock offset='utc'> - <timer name='rtc' tickpolicy='catchup'/> - <timer name='pit' tickpolicy='delay'/> - <timer name='hpet' present='no'/> - </clock> - <on_poweroff>destroy</on_poweroff> - <on_reboot>restart</on_reboot> - <on_crash>restart</on_crash> - <devices> - <emulator>/usr/bin/qemu-system-x86_64</emulator> - <disk type='file' device='disk'> - <driver name='qemu' type='qcow2' discard='unmap'/> - <source file='{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'/> - <target dev='sda' bus='scsi'/> - </disk> - <disk type='file' device='disk'> - <driver name='qemu' type='qcow2' discard='unmap'/> - <source file='{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'/> - <target dev='sdb' bus='scsi'/> - </disk> - <disk type='file' device='cdrom'> - <driver name='qemu' type='raw'/> - <source file='{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'/> - <target dev='sdc' bus='scsi'/> - <readonly/> - </disk> - <controller type='scsi' model='virtio-scsi' /> - <interface type='network'> - <source network='{{ libvirt_network }}'/> - <model type='virtio'/> - </interface> - <serial type='pty'> - <target port='0'/> - </serial> - <console type='pty'> - <target type='serial' port='0'/> - </console> - <memballoon model='virtio'> - </memballoon> - </devices> -</domain> diff --git a/playbooks/libvirt/openshift-cluster/templates/meta-data b/playbooks/libvirt/openshift-cluster/templates/meta-data deleted file mode 100644 index 6b421770d..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/meta-data +++ /dev/null @@ -1,3 +0,0 @@ -instance-id: {{ item[0] }} -hostname: {{ item[0] }} -local-hostname: {{ item[0] }}.example.com diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml deleted file mode 100644 index 0ce2a8342..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/network.xml +++ /dev/null @@ -1,23 +0,0 @@ -<network> - <name>{{ libvirt_network }}</name> - <forward mode='nat'> - <nat> - <port start='1024' end='65535'/> - </nat> - </forward> - <!-- TODO: query for first available virbr interface available --> - <bridge name='virbr3' stp='on' delay='0'/> - <!-- TODO: make overridable --> - <domain name='example.com' localOnly='yes' /> - <dns> - <!-- TODO: automatically add host entries --> - </dns> - <!-- TODO: query for available address space --> - <ip address='192.168.55.1' netmask='255.255.255.0'> - <dhcp> - <range start='192.168.55.2' end='192.168.55.254'/> - <!-- TODO: add static entries addresses for the hosts to be created --> - </dhcp> - </ip> -</network> - diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml deleted file mode 100644 index da139afd0..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml +++ /dev/null @@ -1,6 +0,0 @@ -<pool type='dir'> - <name>{{ libvirt_storage_pool }}</name> - <target> - <path>{{ libvirt_storage_pool_path }}</path> - </target> -</pool> diff --git a/playbooks/libvirt/openshift-cluster/templates/user-data b/playbooks/libvirt/openshift-cluster/templates/user-data deleted file mode 100644 index fbcf7c886..000000000 --- a/playbooks/libvirt/openshift-cluster/templates/user-data +++ /dev/null @@ -1,43 +0,0 @@ -#cloud-config -disable_root: true - -hostname: {{ item[0] }} -fqdn: {{ item[0] }}.example.com - -mounts: -- [ sdb ] - -users: - - default - - name: root - ssh_authorized_keys: - - {{ lookup('file', '~/.ssh/id_rsa.pub') }} - -system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - -ssh_authorized_keys: - - {{ lookup('file', '~/.ssh/id_rsa.pub') }} - -write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - content: | - Defaults:openshift !requiretty - - path: /etc/sysconfig/docker-storage-setup - owner: root:root - permissions: '0644' - content: | - DEVS=/dev/sdb - VG=docker_vg - EXTRA_DOCKER_STORAGE_OPTIONS='--storage-opt dm.blkdiscard=true' - - path: /etc/systemd/system/fstrim.timer.d/hourly.conf - content: | - [Timer] - OnCalendar=hourly - -runcmd: - - NETWORK_CONFIG=/etc/sysconfig/network-scripts/ifcfg-eth0; if ! grep DHCP_HOSTNAME ${NETWORK_CONFIG}; then echo 'DHCP_HOSTNAME="{{ item[0] }}.example.com"' >> ${NETWORK_CONFIG}; fi; pkill -9 dhclient; service network restart - - systemctl enable --now fstrim.timer diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml deleted file mode 100644 index 8a63d11a5..000000000 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ /dev/null @@ -1,70 +0,0 @@ ---- -# TODO: does not handle a non-existent cluster gracefully - -- name: Terminate instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: cluster_group=tag_clusterid-{{ cluster_id }} - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: '{{ groups[cluster_group] | default([]) }}' - -- name: Unsubscribe VMs - hosts: oo_hosts_to_terminate - vars_files: - - vars.yml - roles: - - role: rhel_unsubscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - -- name: Terminate instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Destroy VMs - virt: - name: '{{ item[0] }}' - command: '{{ item[1] }}' - uri: '{{ libvirt_uri }}' - with_nested: - - "{{ groups['oo_hosts_to_terminate'] }}" - - [ destroy, undefine ] - - - name: Delete VM drives - command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2' - args: - removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2' - with_items: "{{ groups['oo_hosts_to_terminate'] }}" - - - name: Delete VM docker drives - command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2' - args: - removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2' - with_items: "{{ groups['oo_hosts_to_terminate'] }}" - - - name: Delete the VM cloud-init image - file: - path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso' - state: absent - with_items: "{{ groups['oo_hosts_to_terminate'] }}" - - - name: Remove the cloud-init config directory - file: - path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/' - state: absent - with_items: "{{ groups['oo_hosts_to_terminate'] }}" diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml deleted file mode 100644 index a152135fc..000000000 --- a/playbooks/libvirt/openshift-cluster/update.yml +++ /dev/null @@ -1,37 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: '{{ g_all_hosts }}' - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- name: Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - vars_files: - - vars.yml - - cluster_hosts.yml - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: '{{ g_all_hosts | default([]) }}' - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: config.yml diff --git a/playbooks/libvirt/openshift-cluster/vars.yml b/playbooks/libvirt/openshift-cluster/vars.yml deleted file mode 100644 index 5156789e7..000000000 --- a/playbooks/libvirt/openshift-cluster/vars.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -default_pool_path: "{{ lookup('env','HOME') }}/libvirt-storage-pool-openshift-ansible" -libvirt_storage_pool_path: "{{ lookup('oo_option', 'libvirt_storage_pool_path') | default(default_pool_path, True) }}" -libvirt_storage_pool: "{{ lookup('oo_option', 'libvirt_storage_pool') | default('openshift-ansible', True) }}" -libvirt_network: "{{ lookup('oo_option', 'libvirt_network') | default('openshift-ansible', True) }}" -libvirt_instance_memory_mib: "{{ lookup('oo_option', 'libvirt_instance_memory_mib') | default(1024, True) }}" -libvirt_instance_vcpu: "{{ lookup('oo_option', 'libvirt_instance_vcpu') | default(2, True) }}" -libvirt_uri: "{{ lookup('oo_option', 'libvirt_uri') | default('qemu:///system', True) }}" -debug_level: 2 - -# Automatic download of the qcow2 image for RHEL cannot be done directly from the RedHat portal because it requires authentication. -# The default value of image_url for enterprise and openshift-enterprise deployment types below won't work. -deployment_rhel7_ent_base: - image: - url: "{{ lookup('oo_option', 'image_url') | - default('https://access.cdn.redhat.com//content/origin/files/sha256/25/25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0/rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}" - name: "{{ lookup('oo_option', 'image_name') | - default('rhel-guest-image-7.2-20151102.0.x86_64.qcow2', True) }}" - sha256: "{{ lookup('oo_option', 'image_sha256') | - default('25f880767ec6bf71beb532e17f1c45231640bbfdfbbb1dffb79d2c1b328388e0', True) }}" - compression: "" - ssh_user: openshift - become: yes - -deployment_vars: - origin: - image: - url: "{{ lookup('oo_option', 'image_url') | - default('http://cloud.centos.org/centos/7/images/CentOS-7-x86_64-GenericCloud-1602.qcow2.xz', True) }}" - compression: "{{ lookup('oo_option', 'image_compression') | - default('xz', True) }}" - name: "{{ lookup('oo_option', 'image_name') | - default('CentOS-7-x86_64-GenericCloud.qcow2', True) }}" - sha256: "{{ lookup('oo_option', 'image_sha256') | - default('dd0f5e610e7c5ffacaca35ed7a78a19142a588f4543da77b61c1fb0d74400471', True) }}" - ssh_user: openshift - become: yes - enterprise: "{{ deployment_rhel7_ent_base }}" - openshift-enterprise: "{{ deployment_rhel7_ent_base }}" - atomic-enterprise: "{{ deployment_rhel7_ent_base }}" diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md deleted file mode 100644 index a6d8d6995..000000000 --- a/playbooks/openstack/README.md +++ /dev/null @@ -1,4 +0,0 @@ -# OpenStack playbooks - -This playbook directory is meant to be driven by [`bin/cluster`](../../bin), -which is community supported and most use is considered deprecated. diff --git a/playbooks/openstack/openshift-cluster/cluster_hosts.yml b/playbooks/openstack/openshift-cluster/cluster_hosts.yml deleted file mode 100644 index 98434439c..000000000 --- a/playbooks/openstack/openshift-cluster/cluster_hosts.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -g_all_hosts: "{{ groups['meta-clusterid_' ~ cluster_id] | default([]) - | intersect(groups['meta-environment_' ~ cluster_env] | default([])) }}" - -g_etcd_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_etcd'] | default([])) }}" - -g_lb_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_lb'] | default([])) }}" - -g_nfs_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_nfs'] | default([])) }}" - -g_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_master'] | default([])) }}" - -g_new_master_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_master'] | default([])) }}" - -g_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_node'] | default([])) }}" - -g_new_node_hosts: "{{ g_all_hosts | intersect(groups['meta-host-type_new_node'] | default([])) }}" - -g_infra_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_infra'] | default([])) }}" - -g_compute_hosts: "{{ g_node_hosts | intersect(groups['meta-sub-host-type_compute'] | default([])) }}" diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml deleted file mode 100644 index 1366c83ca..000000000 --- a/playbooks/openstack/openshift-cluster/config.yml +++ /dev/null @@ -1,32 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts | default([]) }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- include: ../../common/openshift-cluster/config.yml - vars: - g_nodeonmaster: true - g_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - g_sudo: "{{ deployment_vars[deployment_type].become }}" - openshift_cluster_id: "{{ cluster_id }}" - openshift_debug_level: "{{ debug_level }}" - openshift_deployment_type: "{{ deployment_type }}" - openshift_hosted_registry_selector: 'type=infra' - openshift_hosted_router_selector: 'type=infra' - openshift_master_cluster_method: 'native' - openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" - os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" - openshift_use_flannel: "{{ lookup('oo_option', 'use_flannel') }}" - openshift_use_fluentd: "{{ lookup('oo_option', 'use_fluentd') }}" diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml deleted file mode 100644 index 20ce47c07..000000000 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ /dev/null @@ -1,518 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster - -parameters: - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - subnet_24_prefix: - type: string - label: subnet /24 prefix - description: /24 subnet prefix of the network of the cluster (dot separated number triplet) - - dns_nameservers: - type: comma_delimited_list - label: DNS nameservers list - description: List of DNS nameservers - - external_net: - type: string - label: External network - description: Name of the external network - default: external - - ssh_public_key: - type: string - label: SSH public key - description: SSH public key - hidden: true - - ssh_incoming: - type: string - label: Source of ssh connections - description: Source of legitimate ssh connections - default: 0.0.0.0/0 - - node_port_incoming: - type: string - label: Source of node port connections - description: Authorized sources targeting node ports - default: 0.0.0.0/0 - - num_etcd: - type: number - label: Number of etcd nodes - description: Number of etcd nodes - - num_masters: - type: number - label: Number of masters - description: Number of masters - - num_nodes: - type: number - label: Number of compute nodes - description: Number of compute nodes - - num_infra: - type: number - label: Number of infrastructure nodes - description: Number of infrastructure nodes - - etcd_image: - type: string - label: Etcd image - description: Name of the image for the etcd servers - - master_image: - type: string - label: Master image - description: Name of the image for the master servers - - node_image: - type: string - label: Node image - description: Name of the image for the compute node servers - - infra_image: - type: string - label: Infra image - description: Name of the image for the infra node servers - - etcd_flavor: - type: string - label: Etcd flavor - description: Flavor of the etcd servers - - master_flavor: - type: string - label: Master flavor - description: Flavor of the master servers - - node_flavor: - type: string - label: Node flavor - description: Flavor of the compute node servers - - infra_flavor: - type: string - label: Infra flavor - description: Flavor of the infra node servers - -outputs: - - etcd_names: - description: Name of the etcds - value: { get_attr: [ etcd, name ] } - - etcd_ips: - description: IPs of the etcds - value: { get_attr: [ etcd, private_ip ] } - - etcd_floating_ips: - description: Floating IPs of the etcds - value: { get_attr: [ etcd, floating_ip ] } - - master_names: - description: Name of the masters - value: { get_attr: [ masters, name ] } - - master_ips: - description: IPs of the masters - value: { get_attr: [ masters, private_ip ] } - - master_floating_ips: - description: Floating IPs of the masters - value: { get_attr: [ masters, floating_ip ] } - - node_names: - description: Name of the nodes - value: { get_attr: [ compute_nodes, name ] } - - node_ips: - description: IPs of the nodes - value: { get_attr: [ compute_nodes, private_ip ] } - - node_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ compute_nodes, floating_ip ] } - - infra_names: - description: Name of the nodes - value: { get_attr: [ infra_nodes, name ] } - - infra_ips: - description: IPs of the nodes - value: { get_attr: [ infra_nodes, private_ip ] } - - infra_floating_ips: - description: Floating IPs of the nodes - value: { get_attr: [ infra_nodes, floating_ip ] } - -resources: - - net: - type: OS::Neutron::Net - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - - subnet: - type: OS::Neutron::Subnet - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-subnet - params: - cluster_id: { get_param: cluster_id } - network: { get_resource: net } - cidr: - str_replace: - template: subnet_24_prefix.0/24 - params: - subnet_24_prefix: { get_param: subnet_24_prefix } - dns_nameservers: { get_param: dns_nameservers } - - router: - type: OS::Neutron::Router - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-router - params: - cluster_id: { get_param: cluster_id } - external_gateway_info: - network: { get_param: external_net } - - interface: - type: OS::Neutron::RouterInterface - properties: - router_id: { get_resource: router } - subnet_id: { get_resource: subnet } - - keypair: - type: OS::Nova::KeyPair - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-keypair - params: - cluster_id: { get_param: cluster_id } - public_key: { get_param: ssh_public_key } - - master-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-master-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster master - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 4001 - port_range_max: 4001 - - direction: ingress - protocol: tcp - port_range_min: 8443 - port_range_max: 8443 - - direction: ingress - protocol: tcp - port_range_min: 8444 - port_range_max: 8444 - - direction: ingress - protocol: tcp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: udp - port_range_min: 53 - port_range_max: 53 - - direction: ingress - protocol: tcp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: udp - port_range_min: 8053 - port_range_max: 8053 - - direction: ingress - protocol: tcp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: udp - port_range_min: 24224 - port_range_max: 24224 - - direction: ingress - protocol: tcp - port_range_min: 2224 - port_range_max: 2224 - - direction: ingress - protocol: udp - port_range_min: 5404 - port_range_max: 5404 - - direction: ingress - protocol: udp - port_range_min: 5405 - port_range_max: 5405 - - direction: ingress - protocol: tcp - port_range_min: 9090 - port_range_max: 9090 - - etcd-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-etcd-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id etcd cluster - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 2379 - port_range_max: 2379 - remote_mode: remote_group_id - remote_group_id: { get_resource: master-secgrp } - - direction: ingress - protocol: tcp - port_range_min: 2380 - port_range_max: 2380 - remote_mode: remote_group_id - - node-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-node-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 22 - port_range_max: 22 - remote_ip_prefix: { get_param: ssh_incoming } - - direction: ingress - protocol: tcp - port_range_min: 10250 - port_range_max: 10250 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 10255 - port_range_max: 10255 - remote_mode: remote_group_id - - direction: ingress - protocol: udp - port_range_min: 4789 - port_range_max: 4789 - remote_mode: remote_group_id - - direction: ingress - protocol: tcp - port_range_min: 30000 - port_range_max: 32767 - remote_ip_prefix: { get_param: node_port_incoming } - - infra-secgrp: - type: OS::Neutron::SecurityGroup - properties: - name: - str_replace: - template: openshift-ansible-cluster_id-infra-secgrp - params: - cluster_id: { get_param: cluster_id } - description: - str_replace: - template: Security group for cluster_id OpenShift infrastructure cluster nodes - params: - cluster_id: { get_param: cluster_id } - rules: - - direction: ingress - protocol: tcp - port_range_min: 80 - port_range_max: 80 - - direction: ingress - protocol: tcp - port_range_min: 443 - port_range_max: 443 - - etcd: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_etcd } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: cluster_id-k8s_type-%index% - params: - cluster_id: { get_param: cluster_id } - k8s_type: etcd - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: etcd - image: { get_param: etcd_image } - flavor: { get_param: etcd_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: etcd-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - depends_on: - - interface - - masters: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_masters } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: cluster_id-k8s_type-%index% - params: - cluster_id: { get_param: cluster_id } - k8s_type: master - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: master - image: { get_param: master_image } - flavor: { get_param: master_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: master-secgrp } - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - depends_on: - - interface - - compute_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_nodes } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: cluster_id-k8s_type-sub_host_type-%index% - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - sub_host_type: compute - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: compute - image: { get_param: node_image } - flavor: { get_param: node_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - depends_on: - - interface - - infra_nodes: - type: OS::Heat::ResourceGroup - properties: - count: { get_param: num_infra } - resource_def: - type: heat_stack_server.yaml - properties: - name: - str_replace: - template: cluster_id-k8s_type-sub_host_type-%index% - params: - cluster_id: { get_param: cluster_id } - k8s_type: node - sub_host_type: infra - cluster_env: { get_param: cluster_env } - cluster_id: { get_param: cluster_id } - type: node - subtype: infra - image: { get_param: infra_image } - flavor: { get_param: infra_flavor } - key_name: { get_resource: keypair } - net: { get_resource: net } - subnet: { get_resource: subnet } - secgrp: - - { get_resource: node-secgrp } - - { get_resource: infra-secgrp } - floating_network: { get_param: external_net } - net_name: - str_replace: - template: openshift-ansible-cluster_id-net - params: - cluster_id: { get_param: cluster_id } - depends_on: - - interface diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml deleted file mode 100644 index 435139849..000000000 --- a/playbooks/openstack/openshift-cluster/files/heat_stack_server.yaml +++ /dev/null @@ -1,152 +0,0 @@ -heat_template_version: 2014-10-16 - -description: OpenShift cluster server - -parameters: - - name: - type: string - label: Name - description: Name - - cluster_env: - type: string - label: Cluster environment - description: Environment of the cluster - - cluster_id: - type: string - label: Cluster ID - description: Identifier of the cluster - - type: - type: string - label: Type - description: Type master or node - - subtype: - type: string - label: Sub-type - description: Sub-type compute or infra for nodes, default otherwise - default: default - - key_name: - type: string - label: Key name - description: Key name of keypair - - image: - type: string - label: Image - description: Name of the image - - flavor: - type: string - label: Flavor - description: Name of the flavor - - net: - type: string - label: Net ID - description: Net resource - - net_name: - type: string - label: Net name - description: Net name - - subnet: - type: string - label: Subnet ID - description: Subnet resource - - secgrp: - type: comma_delimited_list - label: Security groups - description: Security group resources - - floating_network: - type: string - label: Floating network - description: Network to allocate floating IP from - -outputs: - - name: - description: Name of the server - value: { get_attr: [ server, name ] } - - private_ip: - description: Private IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 0 - - addr - - floating_ip: - description: Floating IP of the server - value: - get_attr: - - server - - addresses - - { get_param: net_name } - - 1 - - addr - -resources: - - server: - type: OS::Nova::Server - properties: - name: { get_param: name } - key_name: { get_param: key_name } - image: { get_param: image } - flavor: { get_param: flavor } - networks: - - port: { get_resource: port } - user_data: { get_resource: config } - user_data_format: RAW - metadata: - environment: { get_param: cluster_env } - clusterid: { get_param: cluster_id } - host-type: { get_param: type } - sub-host-type: { get_param: subtype } - - port: - type: OS::Neutron::Port - properties: - network: { get_param: net } - fixed_ips: - - subnet: { get_param: subnet } - security_groups: { get_param: secgrp } - - floating-ip: - type: OS::Neutron::FloatingIP - properties: - floating_network: { get_param: floating_network } - port_id: { get_resource: port } - - config: - type: OS::Heat::CloudConfig - properties: - cloud_config: - disable_root: true - - hostname: { get_param: name } - - system_info: - default_user: - name: openshift - sudo: ["ALL=(ALL) NOPASSWD: ALL"] - - write_files: - - path: /etc/sudoers.d/00-openshift-no-requiretty - permissions: 440 - # content: Defaults:openshift !requiretty - # Encoded in base64 to be sure that we do not forget the trailing newline or - # sudo will not be able to parse that file - encoding: b64 - content: RGVmYXVsdHM6b3BlbnNoaWZ0ICFyZXF1aXJldHR5Cg== diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml deleted file mode 100644 index c0bc12f55..000000000 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ /dev/null @@ -1,191 +0,0 @@ ---- -- name: Launch instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - # TODO: Write an Ansible module for dealing with HEAT stacks - # Dealing with the outputs is currently terrible - - - name: Check OpenStack stack - command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' - register: stack_show_result - changed_when: false - failed_when: stack_show_result.rc != 0 and 'Stack not found' not in stack_show_result.stderr - - - set_fact: - heat_stack_action: 'stack-create' - when: stack_show_result.rc == 1 - - set_fact: - heat_stack_action: 'stack-update' - when: stack_show_result.rc == 0 - - - name: Create or Update OpenStack Stack - command: 'heat {{ heat_stack_action }} -f {{ openstack_infra_heat_stack }} - --timeout {{ openstack_heat_timeout }} - -P cluster_env={{ cluster_env }} - -P cluster_id={{ cluster_id }} - -P subnet_24_prefix={{ openstack_subnet_24_prefix }} - -P dns_nameservers={{ openstack_network_dns | join(",") }} - -P external_net={{ openstack_network_external_net }} - -P ssh_public_key="{{ openstack_ssh_public_key }}" - -P ssh_incoming={{ openstack_ssh_access_from }} - -P node_port_incoming={{ openstack_node_port_access_from }} - -P num_etcd={{ num_etcd }} - -P num_masters={{ num_masters }} - -P num_nodes={{ num_nodes }} - -P num_infra={{ num_infra }} - -P etcd_image={{ deployment_vars[deployment_type].image }} - -P master_image={{ deployment_vars[deployment_type].image }} - -P node_image={{ deployment_vars[deployment_type].image }} - -P infra_image={{ deployment_vars[deployment_type].image }} - -P etcd_flavor={{ openstack_flavor["etcd"] }} - -P master_flavor={{ openstack_flavor["master"] }} - -P node_flavor={{ openstack_flavor["node"] }} - -P infra_flavor={{ openstack_flavor["infra"] }} - openshift-ansible-{{ cluster_id }}-stack' - args: - chdir: '{{ playbook_dir }}' - - - name: Wait for OpenStack Stack readiness - shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' - register: stack_show_status_result - until: stack_show_status_result.stdout not in ['CREATE_IN_PROGRESS', 'UPDATE_IN_PROGRESS'] - retries: 30 - delay: 5 - - - name: Display the stack resources - command: 'heat resource-list openshift-ansible-{{ cluster_id }}-stack' - register: stack_resource_list_result - when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - - - name: Display the stack status - command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' - register: stack_show_result - when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - - - name: Delete the stack - command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack' - when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - - - fail: - msg: | - - +--------------------------------------+ - | ^ | - | /!\ Failed to create the heat stack | - | /___\ | - +--------------------------------------+ - - Here is the list of stack resources and their status: - {{ stack_resource_list_result.stdout }} - - Here is the status of the stack: - {{ stack_show_result.stdout }} - - ^ Failed to create the heat stack - /!\ - /___\ Please check the `stack_status_reason` line in the above array to know why. - when: stack_show_status_result.stdout not in ['CREATE_COMPLETE', 'UPDATE_COMPLETE'] - - - name: Read OpenStack Stack outputs - command: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack' - register: stack_show_result - - - set_fact: - parsed_outputs: "{{ stack_show_result | oo_parse_heat_stack_outputs }}" - - - name: Add new etcd instances groups and variables - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[2] }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: 'meta-environment_{{ cluster_env }}, meta-host-type_etcd, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}' - openshift_node_labels: - type: "etcd" - openstack: - public_v4: '{{ item[2] }}' - private_v4: '{{ item[1] }}' - with_together: - - '{{ parsed_outputs.etcd_names }}' - - '{{ parsed_outputs.etcd_ips }}' - - '{{ parsed_outputs.etcd_floating_ips }}' - - - name: Add new master instances groups and variables - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[2] }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: 'meta-environment_{{ cluster_env }}, meta-host-type_master, meta-sub-host-type_default, meta-clusterid_{{ cluster_id }}' - openshift_node_labels: - type: "master" - openstack: - public_v4: '{{ item[2] }}' - private_v4: '{{ item[1] }}' - with_together: - - '{{ parsed_outputs.master_names }}' - - '{{ parsed_outputs.master_ips }}' - - '{{ parsed_outputs.master_floating_ips }}' - - - name: Add new node instances groups and variables - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[2] }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_compute, meta-clusterid_{{ cluster_id }}' - openshift_node_labels: - type: "compute" - openstack: - public_v4: '{{ item[2] }}' - private_v4: '{{ item[1] }}' - with_together: - - '{{ parsed_outputs.node_names }}' - - '{{ parsed_outputs.node_ips }}' - - '{{ parsed_outputs.node_floating_ips }}' - - - name: Add new infra instances groups and variables - add_host: - hostname: '{{ item[0] }}' - ansible_ssh_host: '{{ item[2] }}' - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - groups: 'meta-environment_{{ cluster_env }}, meta-host-type_node, meta-sub-host-type_infra, meta-clusterid_{{ cluster_id }}' - openshift_node_labels: - type: "infra" - openstack: - public_v4: '{{ item[2] }}' - private_v4: '{{ item[1] }}' - with_together: - - '{{ parsed_outputs.infra_names }}' - - '{{ parsed_outputs.infra_ips }}' - - '{{ parsed_outputs.infra_floating_ips }}' - - - name: Wait for ssh - wait_for: - host: '{{ item }}' - port: 22 - with_flattened: - - '{{ parsed_outputs.master_floating_ips }}' - - '{{ parsed_outputs.node_floating_ips }}' - - '{{ parsed_outputs.infra_floating_ips }}' - - - name: Wait for user setup - command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup' - register: result - until: result.rc == 0 - retries: 30 - delay: 1 - with_flattened: - - '{{ parsed_outputs.master_floating_ips }}' - - '{{ parsed_outputs.node_floating_ips }}' - - '{{ parsed_outputs.infra_floating_ips }}' - -- include: update.yml - -- include: list.yml diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml deleted file mode 100644 index 6c6f671be..000000000 --- a/playbooks/openstack/openshift-cluster/list.yml +++ /dev/null @@ -1,24 +0,0 @@ ---- -- name: Generate oo_list_hosts group - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - set_fact: scratch_group=meta-clusterid_{{ cluster_id }} - when: cluster_id != '' - - set_fact: scratch_group=all - when: cluster_id == '' - - add_host: - name: "{{ item }}" - groups: oo_list_hosts - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - oo_public_ipv4: "{{ hostvars[item].openstack.public_v4 }}" - oo_private_ipv4: "{{ hostvars[item].openstack.private_v4 }}" - with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - - debug: - msg: "{{ hostvars | oo_select_keys(groups[scratch_group] | default([])) | oo_pretty_print_cluster('meta-') }}" diff --git a/playbooks/openstack/openshift-cluster/lookup_plugins b/playbooks/openstack/openshift-cluster/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/openstack/openshift-cluster/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/openstack/openshift-cluster/roles b/playbooks/openstack/openshift-cluster/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/openstack/openshift-cluster/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml deleted file mode 100644 index affb57117..000000000 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- name: Terminate instance(s) - hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - add_host: - name: "{{ item }}" - groups: oo_hosts_to_terminate - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ (groups['meta-environment_' ~ cluster_env]|default([])) | intersect(groups['meta-clusterid_' ~ cluster_id ]|default([])) }}" - -- name: Unsubscribe VMs - hosts: oo_hosts_to_terminate - vars_files: - - vars.yml - roles: - - role: rhel_unsubscribe - when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and - ansible_distribution == "RedHat" and - lookup('oo_option', 'rhel_skip_subscription') | default(rhsub_skip, True) | - default('no', True) | lower in ['no', 'false'] - -- hosts: localhost - become: no - connection: local - gather_facts: no - vars_files: - - vars.yml - tasks: - - name: Delete the OpenStack Stack - command: 'heat stack-delete openshift-ansible-{{ cluster_id }}-stack' - register: stack_delete_result - changed_when: stack_delete_result.rc == 0 - failed_when: stack_delete_result.rc != 0 and 'could not be found' not in stack_delete_result.stdout - - - name: Wait for the completion of the OpenStack Stack deletion - shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' - when: stack_delete_result.changed - register: stack_show_result - until: stack_show_result.stdout != 'DELETE_IN_PROGRESS' - retries: 60 - delay: 5 - failed_when: '"Stack not found" not in stack_show_result.stderr and - stack_show_result.stdout != "DELETE_COMPLETE"' diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml deleted file mode 100644 index 6d2af3d26..000000000 --- a/playbooks/openstack/openshift-cluster/update.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- hosts: localhost - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - - add_host: - name: "{{ item }}" - groups: l_oo_all_hosts - with_items: "{{ g_all_hosts }}" - -- hosts: l_oo_all_hosts - gather_facts: no - tasks: - - include_vars: vars.yml - - include_vars: cluster_hosts.yml - -- name: Populate oo_hosts_to_update group - hosts: localhost - connection: local - become: no - gather_facts: no - tasks: - - name: Evaluate oo_hosts_to_update - add_host: - name: "{{ item }}" - groups: oo_hosts_to_update - ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" - ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: "{{ g_all_hosts | default([]) }}" - -- include: ../../common/openshift-cluster/update_repos_and_packages.yml - -- include: config.yml diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml deleted file mode 100644 index ba2855b73..000000000 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ /dev/null @@ -1,38 +0,0 @@ -# yamllint disable rule:colons ---- -debug_level: 2 -openstack_infra_heat_stack: "{{ lookup('oo_option', 'infra_heat_stack' ) | - default('files/heat_stack.yaml', True) }}" -openstack_subnet_24_prefix: "{{ lookup('oo_option', 'subnet_24_prefix' ) | - default('192.168.' + ( ( 1048576 | random % 256 ) | string() ), True) }}" -openstack_network_external_net: "{{ lookup('oo_option', 'external_net' ) | - default('external', True) }}" -openstack_network_dns: "{{ lookup('oo_option', 'dns' ) | - default('8.8.8.8,8.8.4.4', True) | oo_split() }}" -openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_key') | - default('~/.ssh/id_rsa.pub', True)) }}" -openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') | - default('0.0.0.0/0', True) }}" -openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') | - default('0.0.0.0/0', True) }}" -openstack_heat_timeout: "{{ lookup('oo_option', 'heat_timeout') | - default('3', True) }}" -openstack_flavor: - etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}" - master: "{{ lookup('oo_option', 'master_flavor' ) | default('m1.small', True) }}" - infra: "{{ lookup('oo_option', 'infra_flavor' ) | default('m1.small', True) }}" - node: "{{ lookup('oo_option', 'node_flavor' ) | default('m1.medium', True) }}" - -deployment_rhel7_ent_base: - image: "{{ lookup('oo_option', 'image_name') | default('rhel-guest-image-7.2-20151102.0.x86_64', True) }}" - ssh_user: openshift - become: yes - -deployment_vars: - origin: - image: "{{ lookup('oo_option', 'image_name') | default('centos-70-raw', True) }}" - ssh_user: openshift - become: yes - enterprise: "{{ deployment_rhel7_ent_base }}" - openshift-enterprise: "{{ deployment_rhel7_ent_base }}" - atomic-enterprise: "{{ deployment_rhel7_ent_base }}" |