diff options
Diffstat (limited to 'playbooks')
29 files changed, 438 insertions, 149 deletions
diff --git a/playbooks/adhoc/bootstrap-fedora.yml b/playbooks/adhoc/bootstrap-fedora.yml index 5ca383a37..b370d7fba 100644 --- a/playbooks/adhoc/bootstrap-fedora.yml +++ b/playbooks/adhoc/bootstrap-fedora.yml @@ -1,4 +1,5 @@ - hosts: OSEv3 + gather_facts: false tasks: - name: install python and deps for ansible modules raw: dnf install -y python2 python2-dnf libselinux-python libsemanage-python python2-firewall pyOpenSSL python-cryptography diff --git a/playbooks/adhoc/metrics_setup/playbooks/install.yml b/playbooks/adhoc/metrics_setup/playbooks/install.yml index 235f775ef..a9ec3c1ef 100644 --- a/playbooks/adhoc/metrics_setup/playbooks/install.yml +++ b/playbooks/adhoc/metrics_setup/playbooks/install.yml @@ -16,21 +16,30 @@ - name: "Add metrics-deployer" command: "{{item}}" + run_once: true + register: output + failed_when: ('already exists' not in output.stderr) and (output.rc != 0) with_items: - oc project openshift-infra - oc create -f /tmp/metrics-deployer-setup.yaml - name: "Give metrics-deployer SA permissions" command: "oadm policy add-role-to-user edit system:serviceaccount:openshift-infra:metrics-deployer" + run_once: true - name: "Give heapster SA permissions" command: "oadm policy add-cluster-role-to-user cluster-reader system:serviceaccount:openshift-infra:heapster" + run_once: true - name: "Create metrics-deployer secret" command: "oc secrets new metrics-deployer nothing=/dev/null" + register: output + failed_when: ('already exists' not in output.stderr) and (output.rc != 0) + run_once: true - name: "Copy metrics.yaml to remote" copy: "src=../files/metrics.yaml dest=/tmp/metrics.yaml force=yes" - name: "Process yml template" shell: "oc process -f /tmp/metrics.yaml -v MASTER_URL={{ masterPublicURL }},REDEPLOY=true,HAWKULAR_METRICS_HOSTNAME={{ metrics_external_service }},IMAGE_PREFIX={{ metrics_image_prefix }},IMAGE_VERSION={{ metrics_image_version }},USE_PERSISTENT_STORAGE=false | oc create -f -" + run_once: true
\ No newline at end of file diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 4edd44fe4..cd569937c 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -72,6 +72,10 @@ - tuned-profiles-openshift-node - tuned-profiles-origin-node + - name: Remove flannel package + action: "{{ ansible_pkg_mgr }} name=flannel state=absent" + when: openshift_use_flannel | default(false) | bool and not is_atomic | bool + - shell: systemctl reset-failed changed_when: False @@ -92,12 +96,6 @@ - vlinuxbr - vovsbr - - name: restart docker - service: name=docker state=restarted - - - name: restart NetworkManager - service: name=NetworkManager state=restarted - - shell: find /var/lib/origin/openshift.local.volumes -type d -exec umount {} \; 2>/dev/null || true changed_when: False @@ -161,31 +159,39 @@ with_items: - /etc/ansible/facts.d/openshift.fact - /etc/atomic-enterprise + - /etc/dnsmasq.d/origin-dns.conf + - /etc/dnsmasq.d/origin-upstream-dns.conf + - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh - /etc/openshift - /etc/openshift-sdn - /etc/origin - - /etc/systemd/system/atomic-openshift-node.service - - /etc/systemd/system/atomic-openshift-node-dep.service - - /etc/systemd/system/origin-node.service - - /etc/systemd/system/origin-node-dep.service - - /etc/systemd/system/openvswitch.service - /etc/sysconfig/atomic-enterprise-node - /etc/sysconfig/atomic-openshift-node - /etc/sysconfig/atomic-openshift-node-dep - - /etc/sysconfig/origin-node - - /etc/sysconfig/origin-node-dep - /etc/sysconfig/openshift-node - /etc/sysconfig/openshift-node-dep - /etc/sysconfig/openvswitch - /etc/sysconfig/origin-node + - /etc/sysconfig/origin-node + - /etc/sysconfig/origin-node-dep + - /etc/systemd/system/atomic-openshift-node-dep.service + - /etc/systemd/system/atomic-openshift-node.service - /etc/systemd/system/atomic-openshift-node.service.wants + - /etc/systemd/system/docker.service.d/docker-sdn-ovs.conf + - /etc/systemd/system/openvswitch.service + - /etc/systemd/system/origin-node-dep.service + - /etc/systemd/system/origin-node.service - /run/openshift-sdn - /var/lib/atomic-enterprise - /var/lib/openshift - /var/lib/origin - - /etc/NetworkManager/dispatcher.d/99-origin-dns.sh - - /etc/dnsmasq.d/origin-dns.conf - - /etc/dnsmasq.d/origin-upstream-dns.conf + + - name: restart docker + service: name=docker state=restarted + + - name: restart NetworkManager + service: name=NetworkManager state=restarted + - hosts: masters become: yes @@ -289,6 +295,7 @@ - /usr/local/bin/oadm - /usr/local/bin/oc - /usr/local/bin/kubectl + - /etc/flannel # Since we are potentially removing the systemd unit files for separated # master-api and master-controllers services, so we need to reload the @@ -338,7 +345,14 @@ - /etc/ansible/facts.d/openshift.fact - /etc/etcd - /etc/systemd/system/etcd_container.service - - /var/lib/etcd + + # Intenationally using rm command over file module because if someone had mounted a filesystem + # at /var/lib/etcd then the contents was not removed correctly + - name: Remove etcd data + shell: rm -rf /var/lib/etcd/* + args: + warn: no + failed_when: false - hosts: lb become: yes diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml index 647c72239..05cfe7d6e 100644 --- a/playbooks/aws/openshift-cluster/config.yml +++ b/playbooks/aws/openshift-cluster/config.yml @@ -30,7 +30,7 @@ openshift_hosted_router_selector: 'type=infra' openshift_node_labels: region: "{{ deployment_vars[deployment_type].region }}" - type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] if inventory_hostname in groups['tag_host-type_node'] else hostvars[inventory_hostname]['ec2_tag_host-type'] }}" + type: "{{ hostvars[inventory_hostname]['ec2_tag_sub-host-type'] }}" openshift_master_cluster_method: 'native' openshift_use_openshift_sdn: "{{ lookup('oo_option', 'use_openshift_sdn') }}" os_sdn_network_plugin_name: "{{ lookup('oo_option', 'sdn_network_plugin_name') }}" diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index 0a931fbe0..0b85b2485 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -5,6 +5,8 @@ connection: local become: no gather_facts: no + tags: + - always tasks: - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml - add_host: @@ -14,6 +16,8 @@ - hosts: l_oo_all_hosts gather_facts: no + tags: + - always tasks: - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml @@ -22,3 +26,4 @@ openshift_cluster_id: "{{ cluster_id | default('default') }}" openshift_debug_level: "{{ debug_level | default(2) }}" openshift_deployment_type: "{{ deployment_type }}" + openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}" diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh new file mode 120000 index 000000000..d5d864b63 --- /dev/null +++ b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh @@ -0,0 +1 @@ +../../../../common/openshift-cluster/upgrades/files/nuke_images.sh
\ No newline at end of file diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index e9fb3de96..26b31d313 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -1,11 +1,3 @@ -- name: Configure flannel - hosts: oo_first_master - vars: - etcd_urls: "{{ openshift.master.etcd_urls }}" - roles: - - role: flannel_register - when: openshift.common.use_flannel | bool - - name: Additional master configuration hosts: oo_first_master vars: @@ -23,8 +15,6 @@ when: openshift.common.use_manageiq | bool - role: cockpit when: not openshift.common.is_atomic and ( deployment_type in ['atomic-enterprise','openshift-enterprise'] ) and - (osm_use_cockpit | bool or osm_use_cockpit is undefined ) + (osm_use_cockpit | bool or osm_use_cockpit is undefined ) and ( openshift.common.deployment_subtype != 'registry' ) - role: flannel_register when: openshift.common.use_flannel | bool - - diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 5cf5df08e..d6a99fcda 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,14 +1,22 @@ --- - include: evaluate_groups.yml + tags: + - always - include: initialize_facts.yml + tags: + - always - include: validate_hostnames.yml + tags: + - node - include: initialize_openshift_version.yml - name: Set oo_options hosts: oo_all_hosts + tags: + - always tasks: - set_fact: openshift_docker_additional_registries: "{{ lookup('oo_option', 'docker_additional_registries') }}" @@ -30,15 +38,29 @@ when: openshift_docker_log_options is not defined - include: ../openshift-etcd/config.yml + tags: + - etcd - include: ../openshift-nfs/config.yml + tags: + - nfs - include: ../openshift-loadbalancer/config.yml + tags: + - loadbalancer - include: ../openshift-master/config.yml + tags: + - master - include: additional_config.yml + tags: + - master - include: ../openshift-node/config.yml + tags: + - node - include: openshift_hosted.yml + tags: + - hosted diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 3fb42a7fa..b3e02fb97 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -77,7 +77,7 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ g_master_hosts | default([]) }}" - when: g_nodeonmaster | default(false) == true and g_new_node_hosts is not defined + when: g_nodeonmaster | default(false) | bool and not g_new_node_hosts | default(false) | bool - name: Evaluate oo_first_etcd add_host: diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 4d4a09828..4aca4daf4 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -1,5 +1,8 @@ +--- - name: Create persistent volumes hosts: oo_first_master + tags: + - hosted vars: persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" @@ -9,6 +12,8 @@ - name: Create Hosted Resources hosts: oo_first_master + tags: + - hosted pre_tasks: - set_fact: openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" @@ -36,6 +41,90 @@ openshift_serviceaccounts_sccs: - privileged when: not openshift.common.version_gte_3_2_or_1_2 + - role: openshift_hosted - role: openshift_metrics when: openshift.hosted.metrics.deploy | bool - - role: openshift_hosted + - role: cockpit-ui + when: openshift.common.deployment_subtype == 'registry' + +- name: Configure CA certificate for secure registry + hosts: oo_nodes_to_config + tags: + - hosted + tasks: + - name: Create temp directory for kubeconfig + command: mktemp -d /tmp/openshift-ansible-XXXXXX + register: mktemp + when: openshift.common.deployment_subtype == 'registry' + changed_when: false + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + - set_fact: + openshift_hosted_kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig" + when: openshift.common.deployment_subtype == 'registry' + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + - name: Copy the admin client config(s) + command: > + cp {{ openshift.common.config_base }}/master/admin.kubeconfig {{ openshift_hosted_kubeconfig }} + when: openshift.common.deployment_subtype == 'registry' + changed_when: false + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + - name: Retrieve docker-registry route + command: > + {{ openshift.common.client_binary }} get route docker-registry + --template='{{ '{{' }} .spec.host {{ '}}' }}' + --config={{ openshift_hosted_kubeconfig }} + -n default + register: docker_registry_route + when: openshift.common.deployment_subtype == 'registry' + changed_when: false + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + - name: Retrieve registry service IP + command: > + {{ openshift.common.client_binary }} get service docker-registry + --template='{{ '{{' }} .spec.clusterIP {{ '}}' }}' + --config={{ openshift_hosted_kubeconfig }} + -n default + register: docker_registry_service_ip + when: openshift.common.deployment_subtype == 'registry' + changed_when: false + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + - name: Create registry CA directories + file: + path: "/etc/docker/certs.d/{{ item }}" + state: directory + with_items: + - "{{ docker_registry_service_ip.stdout }}:5000" + - "{{ docker_registry_route.stdout }}" + - "docker-registry.default.svc.cluster.local:5000" + when: openshift.common.deployment_subtype == 'registry' + - name: Copy CA to registry CA directories + copy: + src: "{{ openshift.common.config_base }}/node/ca.crt" + dest: "/etc/docker/certs.d/{{ item }}" + remote_src: yes + force: yes + with_items: + - "{{ docker_registry_service_ip.stdout }}:5000" + - "{{ docker_registry_route.stdout }}" + - "docker-registry.default.svc.cluster.local:5000" + when: openshift.common.deployment_subtype == 'registry' + notify: + - Restart docker + - name: Delete temp directory + file: + name: "{{ mktemp.stdout }}" + state: absent + when: openshift.common.deployment_subtype == 'registry' + changed_when: False + delegate_to: "{{ groups.oo_first_master.0 }}" + run_once: true + handlers: + - name: Restart docker + service: + name: docker + state: restarted diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml index b97906072..5b72c3450 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml @@ -52,6 +52,14 @@ openshift_ca_host: "{{ groups.oo_first_master.0 }}" openshift_master_count: "{{ openshift.master.master_count | default(groups.oo_masters | length) }}" pre_tasks: + # set_fact task copied from playbooks/common/openshift-master/config.yml + # so that openshift_master_default_subdomain has a default value of "" + # (emptry string). openshift_master_default_subdomain must have a default + # value for openshift_master_facts to set metrics_public_url. + # TODO: clean this up. + - set_fact: + openshift_master_default_subdomain: "{{ lookup('oo_option', 'openshift_master_default_subdomain') | default(None, true) }}" + when: openshift_master_default_subdomain is not defined - stat: path: "{{ openshift_generated_configs_dir }}" register: openshift_generated_configs_dir_stat @@ -133,7 +141,9 @@ hosts: oo_etcd_to_config tasks: - name: restart etcd - service: name=etcd state=restarted + service: + name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}" + state: restarted - name: Stop master services hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml new file mode 100644 index 000000000..e8a20aa2b --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -0,0 +1,69 @@ +--- +- name: Create local temp directory for syncing certs + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - name: Create local temp directory for syncing certs + local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX + register: local_cert_sync_tmpdir + changed_when: false + +- name: Create service signer certificate + hosts: oo_first_master + tasks: + - name: Create remote temp directory for creating certs + command: mktemp -d /tmp/openshift-ansible-XXXXXXX + register: remote_cert_create_tmpdir + changed_when: false + + - name: Create service signer certificate + command: > + {{ openshift.common.admin_binary }} ca create-signer-cert + --cert=service-signer.crt + --key=service-signer.key + --name=openshift-service-serving-signer + --serial=service-signer.serial.txt + args: + chdir: "{{ remote_cert_create_tmpdir.stdout }}/" + + - name: Retrieve service signer certificate + fetch: + src: "{{ remote_cert_create_tmpdir.stdout }}/{{ item }}" + dest: "{{ hostvars.localhost.local_cert_sync_tmpdir.stdout }}/" + flat: yes + fail_on_missing: yes + validate_checksum: yes + with_items: + - "service-signer.crt" + - "service-signer.key" + + - name: Delete remote temp directory + file: + name: "{{ remote_cert_create_tmpdir.stdout }}" + state: absent + changed_when: false + +- name: Deploy service signer certificate + hosts: oo_masters_to_config + tasks: + - name: Deploy service signer certificate + copy: + src: "{{ hostvars.localhost.local_cert_sync_tmpdir.stdout }}/{{ item }}" + dest: "{{ openshift.common.config_base }}/master/" + with_items: + - "service-signer.crt" + - "service-signer.key" + +- name: Delete local temp directory + hosts: localhost + connection: local + become: no + gather_facts: no + tasks: + - name: Delete local temp directory + file: + name: "{{ local_cert_sync_tmpdir.stdout }}" + state: absent + changed_when: false diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml index 03e7b844c..417096dd0 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade.yml @@ -39,6 +39,10 @@ - service: name=docker state=started +- name: Update docker facts + openshift_facts: + role: docker + - name: Restart containerized services service: name={{ item }} state=started with_items: diff --git a/playbooks/common/openshift-cluster/upgrades/post.yml b/playbooks/common/openshift-cluster/upgrades/post.yml index bd97d0b34..e43954453 100644 --- a/playbooks/common/openshift-cluster/upgrades/post.yml +++ b/playbooks/common/openshift-cluster/upgrades/post.yml @@ -56,3 +56,17 @@ {{ oc_cmd }} patch dc/docker-registry -n default -p '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' --api-version=v1 + +# Check for warnings to be printed at the end of the upgrade: +- name: Check for warnings + hosts: oo_masters_to_config + tasks: + # Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond: + - command: > + grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml + register: grep_plugin_order_override + when: openshift.common.version_gte_3_3_or_1_3 | bool + failed_when: false + - name: Warn if pluginOrderOverride is in use in master-config.yaml + debug: msg="WARNING pluginOrderOverride is being deprecated in master-config.yaml, please see https://docs.openshift.com/enterprise/latest/architecture/additional_concepts/admission_controllers.html for more information." + when: not grep_plugin_order_override | skipped and grep_plugin_order_override.rc == 0 diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/upgrade.yml index 3ec47d6f3..ba4fc63be 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade.yml @@ -34,7 +34,7 @@ ############################################################################### # Upgrade Masters ############################################################################### -- name: Upgrade master +- name: Upgrade master packages hosts: oo_masters_to_config handlers: - include: ../../../../roles/openshift_master/handlers/main.yml @@ -45,6 +45,28 @@ - include: rpm_upgrade.yml component=master when: not openshift.common.is_containerized | bool +- name: Determine if service signer cert must be created + hosts: oo_first_master + tasks: + - name: Determine if service signer certificate must be created + stat: + path: "{{ openshift.common.config_base }}/master/service-signer.crt" + register: service_signer_cert_stat + changed_when: false + +# Create service signer cert when missing. Service signer certificate +# is added to master config in the master config hook for v3_3. +- include: create_service_signer_cert.yml + when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) + +- name: Upgrade master config and systemd units + hosts: oo_masters_to_config + handlers: + - include: ../../../../roles/openshift_master/handlers/main.yml + static: yes + roles: + - openshift_facts + tasks: - include: "{{ master_config_hook }}" when: master_config_hook is defined @@ -110,6 +132,52 @@ when: master_update_failed | length > 0 ############################################################################### +# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints +############################################################################### + +- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints + hosts: oo_masters_to_config + roles: + - { role: openshift_cli } + vars: + origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" + ent_reconcile_bindings: true + openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" + # Similar to pre.yml, we don't want to upgrade docker during the openshift_cli role, + # it will be updated when we perform node upgrade. + docker_protect_installed_version: True + tasks: + - name: Verifying the correct commandline tools are available + shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} + when: openshift.common.is_containerized | bool and verify_upgrade_version is defined + + - name: Reconcile Cluster Roles + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-roles --additive-only=true --confirm + run_once: true + + - name: Reconcile Cluster Role Bindings + command: > + {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig + policy reconcile-cluster-role-bindings + --exclude-groups=system:authenticated + --exclude-groups=system:authenticated:oauth + --exclude-groups=system:unauthenticated + --exclude-users=system:anonymous + --additive-only=true --confirm + when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool + run_once: true + + - name: Reconcile Security Context Constraints + command: > + {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true + run_once: true + + - set_fact: + reconcile_complete: True + +############################################################################### # Upgrade Nodes ############################################################################### @@ -127,6 +195,18 @@ # TODO: To better handle re-trying failed upgrades, it would be nice to check if the node # or docker actually needs an upgrade before proceeding. Perhaps best to save this until # we merge upgrade functionality into the base roles and a normal config.yml playbook run. + - name: Determine if node is currently scheduleable + command: > + {{ openshift.common.client_binary }} get node {{ openshift.common.hostname | lower }} -o json + register: node_output + delegate_to: "{{ groups.oo_first_master.0 }}" + changed_when: false + when: inventory_hostname in groups.oo_nodes_to_config + + - set_fact: + was_schedulable: "{{ 'unschedulable' not in (node_output.stdout | from_json).spec }}" + when: inventory_hostname in groups.oo_nodes_to_config + - name: Mark unschedulable if host is a node command: > {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=false @@ -142,7 +222,7 @@ - include: docker/upgrade.yml when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool - include: "{{ node_config_hook }}" - when: node_config_hook is defined + when: node_config_hook is defined and inventory_hostname in groups.oo_nodes_to_config - include: rpm_upgrade.yml vars: @@ -153,55 +233,14 @@ - include: containerized_node_upgrade.yml when: inventory_hostname in groups.oo_nodes_to_config and openshift.common.is_containerized | bool + - meta: flush_handlers + - name: Set node schedulability command: > {{ openshift.common.admin_binary }} manage-node {{ openshift.common.hostname | lower }} --schedulable=true delegate_to: "{{ groups.oo_first_master.0 }}" - when: inventory_hostname in groups.oo_nodes_to_config and openshift.node.schedulable | bool - - -############################################################################### -# Reconcile Cluster Roles, Cluster Role Bindings and Security Context Constraints -############################################################################### - -- name: Reconcile Cluster Roles and Cluster Role Bindings and Security Context Constraints - hosts: oo_masters_to_config - roles: - - { role: openshift_cli } - vars: - origin_reconcile_bindings: "{{ deployment_type == 'origin' and openshift_version | version_compare('1.0.6', '>') }}" - ent_reconcile_bindings: true - openshift_docker_hosted_registry_network: "{{ hostvars[groups.oo_first_master.0].openshift.common.portal_net }}" - tasks: - - name: Verifying the correct commandline tools are available - shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} - when: openshift.common.is_containerized | bool and verify_upgrade_version is defined - - - name: Reconcile Cluster Roles - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --additive-only=true --confirm - run_once: true - - - name: Reconcile Cluster Role Bindings - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-role-bindings - --exclude-groups=system:authenticated - --exclude-groups=system:authenticated:oauth - --exclude-groups=system:unauthenticated - --exclude-users=system:anonymous - --additive-only=true --confirm - when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool - run_once: true + when: inventory_hostname in groups.oo_nodes_to_config and was_schedulable | bool - - name: Reconcile Security Context Constraints - command: > - {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true - run_once: true - - - set_fact: - reconcile_complete: True ############################################################################## # Gate on reconcile diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml index 638ef23a8..684eea343 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml @@ -16,7 +16,7 @@ - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.ops' + yaml_key: 'masterClients.externalKubernetesClientConnectionOverrides.qps' yaml_value: 200 - modify_yaml: @@ -36,5 +36,15 @@ - modify_yaml: dest: "{{ openshift.common.config_base}}/master/master-config.yaml" - yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.ops' + yaml_key: 'masterClients.openshiftLoopbackClientConnectionOverrides.qps' yaml_value: 300 + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.servicesServingCert.signer.certFile' + yaml_value: service-signer.crt + +- modify_yaml: + dest: "{{ openshift.common.config_base}}/master/master-config.yaml" + yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile' + yaml_value: service-signer.key diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml index 1297938bc..8f64636ae 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_3/node_config_upgrade.yml @@ -16,6 +16,6 @@ - modify_yaml: dest: "{{ openshift.common.config_base}}/node/node-config.yaml" - yaml_key: 'masterClientConnectionOverrides.ops' + yaml_key: 'masterClientConnectionOverrides.qps' yaml_value: 20 diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index 1d818eea0..7f60cd9e4 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -48,6 +48,12 @@ - set_fact: openshift_hosted_metrics_resolution: "{{ lookup('oo_option', 'openshift_hosted_metrics_resolution') | default('10s', true) }}" when: openshift_hosted_metrics_resolution is not defined + - set_fact: + openshift_hosted_metrics_deployer_prefix: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_prefix') | default('openshift') }}" + when: openshift_hosted_metrics_deployer_prefix is not defined + - set_fact: + openshift_hosted_metrics_deployer_version: "{{ lookup('oo_option', 'openshift_hosted_metrics_deployer_version') | default('latest') }}" + when: openshift_hosted_metrics_deployer_prefix is not defined roles: - openshift_facts post_tasks: diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml index c56353430..66eb293e5 100644 --- a/playbooks/common/openshift-node/config.yml +++ b/playbooks/common/openshift-node/config.yml @@ -43,7 +43,7 @@ ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" ansible_become: "{{ g_sudo | default(omit) }}" with_items: "{{ groups.oo_nodes_to_config | default([]) }}" - when: hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) + when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config) - name: Configure node instances hosts: oo_containerized_master_nodes @@ -156,10 +156,7 @@ - name: Set schedulability hosts: oo_first_master vars: - openshift_nodes: "{{ hostvars - | oo_select_keys(groups['oo_nodes_to_config']) - | oo_collect('openshift.common.hostname') }}" - openshift_node_vars: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']) }}" + openshift_nodes: "{{ groups.oo_nodes_to_config | default([]) }}" pre_tasks: # Necessary because when you're on a node that's also a master the master will be # restarted after the node restarts docker and it will take up to 60 seconds for diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml index 3117d9edc..b42ca83af 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_network.yml @@ -1,27 +1,11 @@ --- -- name: Test if libvirt network for openshift already exists - command: "virsh -c {{ libvirt_uri }} net-info {{ libvirt_network }}" - register: net_info_result - changed_when: False - failed_when: "net_info_result.rc != 0 and 'no network with matching name' not in net_info_result.stderr" - -- name: Create a temp directory for the template xml file - command: "mktemp -d /tmp/openshift-ansible-XXXXXXX" - register: mktemp - when: net_info_result.rc == 1 - -- name: Create network xml file - template: - src: templates/network.xml - dest: "{{ mktemp.stdout }}/network.xml" - when: net_info_result.rc == 1 - -- name: Create libvirt network for openshift - command: "virsh -c {{ libvirt_uri }} net-create {{ mktemp.stdout }}/network.xml" - when: net_info_result.rc == 1 - -- name: Remove the temp directory - file: - path: "{{ mktemp.stdout }}" - state: absent - when: net_info_result.rc == 1 +- name: Create the libvirt network for OpenShift + virt_net: + name: '{{ libvirt_network }}' + state: '{{ item }}' + autostart: 'yes' + xml: "{{ lookup('template', 'network.xml') }}" + uri: '{{ libvirt_uri }}' + with_items: + - present + - active diff --git a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml index 397158b9e..8685624ec 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/configure_libvirt_storage_pool.yml @@ -6,22 +6,25 @@ # We need to set permissions on the directory and any items created under the directory, so we need to call the acl module with and without default set. - acl: - default: "{{ item }}" + default: '{{ item.default }}' entity: kvm etype: group name: "{{ libvirt_storage_pool_path }}" - permissions: rwx + permissions: '{{ item.permissions }}' state: present with_items: - - no - - yes + - default: no + permissions: x + - default: yes + permissions: rwx -- name: Test if libvirt storage pool for openshift already exists - command: "virsh -c {{ libvirt_uri }} pool-info {{ libvirt_storage_pool }}" - register: pool_info_result - changed_when: False - failed_when: "pool_info_result.rc != 0 and 'no storage pool with matching name' not in pool_info_result.stderr" - -- name: Create the libvirt storage pool for openshift - command: 'virsh -c {{ libvirt_uri }} pool-create-as {{ libvirt_storage_pool }} dir --target {{ libvirt_storage_pool_path }}' - when: pool_info_result.rc == 1 +- name: Create the libvirt storage pool for OpenShift + virt_pool: + name: '{{ libvirt_storage_pool }}' + state: '{{ item }}' + autostart: 'yes' + xml: "{{ lookup('template', 'storage-pool.xml') }}" + uri: '{{ libvirt_uri }}' + with_items: + - present + - active diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml index cc34d0ef9..e0afc43ba 100644 --- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml @@ -134,4 +134,4 @@ delay: 1 with_together: - '{{ instances }}' - - '{{ ips }}'
\ No newline at end of file + - '{{ ips }}' diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml index 050bc7ab9..0ce2a8342 100644 --- a/playbooks/libvirt/openshift-cluster/templates/network.xml +++ b/playbooks/libvirt/openshift-cluster/templates/network.xml @@ -1,5 +1,5 @@ <network> - <name>openshift-ansible</name> + <name>{{ libvirt_network }}</name> <forward mode='nat'> <nat> <port start='1024' end='65535'/> diff --git a/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml new file mode 100644 index 000000000..da139afd0 --- /dev/null +++ b/playbooks/libvirt/openshift-cluster/templates/storage-pool.xml @@ -0,0 +1,6 @@ +<pool type='dir'> + <name>{{ libvirt_storage_pool }}</name> + <target> + <path>{{ libvirt_storage_pool_path }}</path> + </target> +</pool> diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 2d0098784..458cf5ac7 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -42,6 +42,12 @@ parameters: description: Source of legitimate ssh connections default: 0.0.0.0/0 + node_port_incoming: + type: string + label: Source of node port connections + description: Authorized sources targetting node ports + default: 0.0.0.0/0 + num_etcd: type: number label: Number of etcd nodes @@ -393,6 +399,11 @@ resources: port_range_min: 4789 port_range_max: 4789 remote_mode: remote_group_id + - direction: ingress + protocol: tcp + port_range_min: 30000 + port_range_max: 32767 + remote_ip_prefix: { get_param: node_port_incoming } infra-secgrp: type: OS::Neutron::SecurityGroup diff --git a/playbooks/openstack/openshift-cluster/launch.yml b/playbooks/openstack/openshift-cluster/launch.yml index b9aae2f4c..5cf543204 100644 --- a/playbooks/openstack/openshift-cluster/launch.yml +++ b/playbooks/openstack/openshift-cluster/launch.yml @@ -33,6 +33,7 @@ -P external_net={{ openstack_network_external_net }} -P ssh_public_key="{{ openstack_ssh_public_key }}" -P ssh_incoming={{ openstack_ssh_access_from }} + -P node_port_incoming={{ openstack_node_port_access_from }} -P num_etcd={{ num_etcd }} -P num_masters={{ num_masters }} -P num_nodes={{ num_nodes }} @@ -48,6 +49,8 @@ -P infra_flavor={{ openstack_flavor["infra"] }} -P dns_flavor={{ openstack_flavor["dns"] }} openshift-ansible-{{ cluster_id }}-stack' + args: + chdir: '{{ playbook_dir }}' - name: Wait for OpenStack Stack readiness shell: 'heat stack-show openshift-ansible-{{ cluster_id }}-stack | awk ''$2 == "stack_status" {print $4}''' @@ -107,9 +110,9 @@ openshift_node_labels: type: "etcd" with_together: - - parsed_outputs.etcd_names - - parsed_outputs.etcd_ips - - parsed_outputs.etcd_floating_ips + - '{{ parsed_outputs.etcd_names }}' + - '{{ parsed_outputs.etcd_ips }}' + - '{{ parsed_outputs.etcd_floating_ips }}' - name: Add new master instances groups and variables add_host: @@ -121,9 +124,9 @@ openshift_node_labels: type: "master" with_together: - - parsed_outputs.master_names - - parsed_outputs.master_ips - - parsed_outputs.master_floating_ips + - '{{ parsed_outputs.master_names }}' + - '{{ parsed_outputs.master_ips }}' + - '{{ parsed_outputs.master_floating_ips }}' - name: Add new node instances groups and variables add_host: @@ -135,9 +138,9 @@ openshift_node_labels: type: "compute" with_together: - - parsed_outputs.node_names - - parsed_outputs.node_ips - - parsed_outputs.node_floating_ips + - '{{ parsed_outputs.node_names }}' + - '{{ parsed_outputs.node_ips }}' + - '{{ parsed_outputs.node_floating_ips }}' - name: Add new infra instances groups and variables add_host: @@ -149,9 +152,9 @@ openshift_node_labels: type: "infra" with_together: - - parsed_outputs.infra_names - - parsed_outputs.infra_ips - - parsed_outputs.infra_floating_ips + - '{{ parsed_outputs.infra_names }}' + - '{{ parsed_outputs.infra_ips }}' + - '{{ parsed_outputs.infra_floating_ips }}' - name: Add DNS groups and variables add_host: @@ -166,10 +169,10 @@ host: '{{ item }}' port: 22 with_flattened: - - parsed_outputs.master_floating_ips - - parsed_outputs.node_floating_ips - - parsed_outputs.infra_floating_ips - - parsed_outputs.dns_floating_ip + - '{{ parsed_outputs.master_floating_ips }}' + - '{{ parsed_outputs.node_floating_ips }}' + - '{{ parsed_outputs.infra_floating_ips }}' + - '{{ parsed_outputs.dns_floating_ip }}' - name: Wait for user setup command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ deployment_vars[deployment_type].ssh_user }}@{{ item }} echo {{ deployment_vars[deployment_type].ssh_user }} user is setup' @@ -178,10 +181,10 @@ retries: 30 delay: 1 with_flattened: - - parsed_outputs.master_floating_ips - - parsed_outputs.node_floating_ips - - parsed_outputs.infra_floating_ips - - parsed_outputs.dns_floating_ip + - '{{ parsed_outputs.master_floating_ips }}' + - '{{ parsed_outputs.node_floating_ips }}' + - '{{ parsed_outputs.infra_floating_ips }}' + - '{{ parsed_outputs.dns_floating_ip }}' - include: update.yml diff --git a/playbooks/openstack/openshift-cluster/list.yml b/playbooks/openstack/openshift-cluster/list.yml index ba9c6bf9c..60372e262 100644 --- a/playbooks/openstack/openshift-cluster/list.yml +++ b/playbooks/openstack/openshift-cluster/list.yml @@ -17,7 +17,7 @@ ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_ssh_host: "{{ hostvars[item].ansible_ssh_host | default(item) }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: groups[scratch_group] | default([]) | difference(['localhost']) + with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}" - name: List Hosts hosts: oo_list_hosts diff --git a/playbooks/openstack/openshift-cluster/terminate.yml b/playbooks/openstack/openshift-cluster/terminate.yml index 5bd8476f1..980ab7337 100644 --- a/playbooks/openstack/openshift-cluster/terminate.yml +++ b/playbooks/openstack/openshift-cluster/terminate.yml @@ -11,7 +11,7 @@ groups: oo_hosts_to_terminate ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}" ansible_become: "{{ deployment_vars[deployment_type].become }}" - with_items: (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([])) + with_items: "{{ (groups['tag_environment_' ~ cluster_env]|default([])) | intersect(groups['tag_clusterid_' ~ cluster_id ]|default([])) }}" - name: Unsubscribe VMs hosts: oo_hosts_to_terminate diff --git a/playbooks/openstack/openshift-cluster/vars.yml b/playbooks/openstack/openshift-cluster/vars.yml index bc53a51b0..17063ef34 100644 --- a/playbooks/openstack/openshift-cluster/vars.yml +++ b/playbooks/openstack/openshift-cluster/vars.yml @@ -12,6 +12,8 @@ openstack_ssh_public_key: "{{ lookup('file', lookup('oo_option', 'public_k default('~/.ssh/id_rsa.pub', True)) }}" openstack_ssh_access_from: "{{ lookup('oo_option', 'ssh_from') | default('0.0.0.0/0', True) }}" +openstack_node_port_access_from: "{{ lookup('oo_option', 'node_port_from') | + default('0.0.0.0/0', True) }}" openstack_flavor: dns: "{{ lookup('oo_option', 'dns_flavor' ) | default('m1.small', True) }}" etcd: "{{ lookup('oo_option', 'etcd_flavor' ) | default('m1.small', True) }}" |