diff options
Diffstat (limited to 'playbooks')
39 files changed, 79 insertions, 208 deletions
| diff --git a/playbooks/adhoc/noc/get_zabbix_problems.yml b/playbooks/adhoc/noc/get_zabbix_problems.yml index 79cae24ab..32fc7ce68 100644 --- a/playbooks/adhoc/noc/get_zabbix_problems.yml +++ b/playbooks/adhoc/noc/get_zabbix_problems.yml @@ -33,7 +33,7 @@      - add_host:          name: "{{ item }}"          groups: problem_hosts_group -      with_items: problem_hosts +      with_items: "{{ problem_hosts }}"  - name: "Run on problem hosts"    hosts: problem_hosts_group diff --git a/playbooks/aws/openshift-cluster/list.yml b/playbooks/aws/openshift-cluster/list.yml index a542b4ca3..4934ae6d0 100644 --- a/playbooks/aws/openshift-cluster/list.yml +++ b/playbooks/aws/openshift-cluster/list.yml @@ -16,7 +16,7 @@        groups: oo_list_hosts        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: groups[scratch_group] | default([]) | difference(['localhost']) +    with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"  - name: List Hosts    hosts: oo_list_hosts diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml index d22c86cda..4d76d3bfe 100644 --- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml @@ -99,8 +99,8 @@  - name: Add Name tag to instances    ec2_tag: resource={{ item.1.id }} region={{ deployment_vars[deployment_type].region }} state=present    with_together: -  - instances -  - ec2.instances +  - "{{ instances }}" +  - "{{ ec2.instances }}"    args:      tags:        Name: "{{ item.0 }}" @@ -154,8 +154,8 @@      openshift_node_labels: "{{ node_label }}"      logrotate_scripts: "{{ logrotate }}"    with_together: -  - instances -  - ec2.instances +  - "{{ instances }}" +  - "{{ ec2.instances }}"  - name: Add new instances to nodes_to_add group if needed    add_host: @@ -169,13 +169,13 @@      openshift_node_labels: "{{ node_label }}"      logrotate_scripts: "{{ logrotate }}"    with_together: -  - instances -  - ec2.instances +  - "{{ instances }}" +  - "{{ ec2.instances }}"    when: oo_extend_env is defined and oo_extend_env | bool  - name: Wait for ssh    wait_for: "port=22 host={{ item.dns_name }}" -  with_items: ec2.instances +  with_items: "{{ ec2.instances }}"  - name: Wait for user setup    command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.0].ansible_ssh_user }}@{{ item.1.dns_name }} echo {{ hostvars[item.0].ansible_ssh_user }} user is setup" @@ -184,5 +184,5 @@    retries: 20    delay: 10    with_together: -  - instances -  - ec2.instances +  - "{{ instances }}" +  - "{{ ec2.instances }}" diff --git a/playbooks/aws/openshift-cluster/terminate.yml b/playbooks/aws/openshift-cluster/terminate.yml index fb13e1839..7a8375d0e 100644 --- a/playbooks/aws/openshift-cluster/terminate.yml +++ b/playbooks/aws/openshift-cluster/terminate.yml @@ -12,7 +12,7 @@        groups: oo_hosts_to_terminate        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) +    with_items: "{{ (groups['tag_clusterid_' ~ cluster_id] | default([])) | difference(['localhost']) }}"  - name: Unsubscribe VMs    hosts: oo_hosts_to_terminate @@ -39,7 +39,7 @@            clusterid:     "{{ hostvars[item]['ec2_tag_clusterid'] }}"            host-type:     "{{ hostvars[item]['ec2_tag_host-type'] }}"            sub_host_type: "{{ hostvars[item]['ec2_tag_sub-host-type'] }}" -      with_items: groups.oo_hosts_to_terminate +      with_items: "{{ groups.oo_hosts_to_terminate }}"        when: "'oo_hosts_to_terminate' in groups"      - name: Terminate instances @@ -49,7 +49,7 @@          region: "{{ hostvars[item].ec2_region }}"        ignore_errors: yes        register: ec2_term -      with_items: groups.oo_hosts_to_terminate +      with_items: "{{ groups.oo_hosts_to_terminate }}"        when: "'oo_hosts_to_terminate' in groups"      # Fail if any of the instances failed to terminate with an error other @@ -57,7 +57,7 @@      - fail:          msg: "Terminating instance {{ item.ec2_id }} failed with message {{ item.msg }}"        when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" -      with_items: ec2_term.results +      with_items: "{{ ec2_term.results }}"      - name: Stop instance if termination failed        ec2: @@ -66,12 +66,12 @@          region: "{{ item.item.ec2_region }}"        register: ec2_stop        when: "'oo_hosts_to_terminate' in groups and item.has_key('failed') and item.failed" -      with_items: ec2_term.results +      with_items: "{{ ec2_term.results }}"      - name: Rename stopped instances        ec2_tag: resource={{ item.item.item.ec2_id }} region={{ item.item.item.ec2_region }} state=present        args:          tags:            Name: "{{ item.item.item.ec2_tag_Name }}-terminate" -      with_items: ec2_stop.results +      with_items: "{{ ec2_stop.results }}"        when: ec2_stop | changed diff --git a/playbooks/aws/openshift-cluster/update.yml b/playbooks/aws/openshift-cluster/update.yml index d762203b2..ed05d61ed 100644 --- a/playbooks/aws/openshift-cluster/update.yml +++ b/playbooks/aws/openshift-cluster/update.yml @@ -7,7 +7,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts +    with_items: "{{ g_all_hosts }}"  - hosts: l_oo_all_hosts    gather_facts: no @@ -27,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: g_all_hosts | default([]) +    with_items: "{{ g_all_hosts | default([]) }}"  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml index 9be6becc1..834461e14 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml @@ -25,13 +25,13 @@    tasks:    - name: Prepare for Node evacuation      command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=false +      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=false      delegate_to: "{{ groups.oo_first_master.0 }}"      when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade    - name: Evacuate Node for Kubelet upgrade      command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --evacuate --force +      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --evacuate --force      delegate_to: "{{ groups.oo_first_master.0 }}"      when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade @@ -40,7 +40,7 @@    - name: Set node schedulability      command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename }} --schedulable=true +      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename }} --schedulable=true      delegate_to: "{{ groups.oo_first_master.0 }}"      when: openshift.node.schedulable | bool      when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade and openshift.node.schedulable | bool diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml index 1755203a4..a3ab78ccf 100644 --- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml @@ -10,7 +10,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts | default([]) +    with_items: "{{ g_all_hosts | default([]) }}"      changed_when: false  - hosts: l_oo_all_hosts diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml index 5d549eee7..d92761e48 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml @@ -10,7 +10,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts | default([]) +    with_items: "{{ g_all_hosts | default([]) }}"  - hosts: l_oo_all_hosts    gather_facts: no diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml index 0cf669ae3..0a163526a 100644 --- a/playbooks/byo/openshift-master/restart.yml +++ b/playbooks/byo/openshift-master/restart.yml @@ -8,7 +8,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts +    with_items: "{{ g_all_hosts }}"  - hosts: l_oo_all_hosts    gather_facts: no diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml index fced79262..279eeab21 100644 --- a/playbooks/byo/openshift-master/scaleup.yml +++ b/playbooks/byo/openshift-master/scaleup.yml @@ -8,7 +8,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts +    with_items: "{{ g_all_hosts }}"  - hosts: l_oo_all_hosts    gather_facts: no diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml index 5737bb0e0..902221931 100644 --- a/playbooks/byo/openshift-node/scaleup.yml +++ b/playbooks/byo/openshift-node/scaleup.yml @@ -8,7 +8,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts +    with_items: "{{ g_all_hosts }}"  - hosts: l_oo_all_hosts    gather_facts: no diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml index f093411ef..f36caeb36 100644 --- a/playbooks/byo/rhel_subscribe.yml +++ b/playbooks/byo/rhel_subscribe.yml @@ -8,7 +8,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts +    with_items: "{{ g_all_hosts }}"  - hosts: l_oo_all_hosts    gather_facts: no diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml index 26b31d313..825f46415 100644 --- a/playbooks/common/openshift-cluster/additional_config.yml +++ b/playbooks/common/openshift-cluster/additional_config.yml @@ -11,6 +11,8 @@    - role: openshift_examples      registry_url: "{{ openshift.master.registry_url }}"      when: openshift.common.install_examples | bool +  - role: openshift_hosted_templates +    registry_url: "{{ openshift.master.registry_url }}"    - role: openshift_manageiq      when: openshift.common.use_manageiq | bool    - role: cockpit diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml index 3cc23f9c1..352d266a5 100644 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ b/playbooks/common/openshift-cluster/openshift_hosted.yml @@ -56,13 +56,13 @@      openshift_hosted_logging_ops_hostname: "{{ logging_ops_hostname }}"      openshift_hosted_logging_master_public_url: "{{ logging_master_public_url }}"      openshift_hosted_logging_elasticsearch_cluster_size: "{{ logging_elasticsearch_cluster_size }}" -    openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else 'false' }}" -    openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else ''  }}" -    openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) is not none else '' }}" +    openshift_hosted_logging_elasticsearch_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" +    openshift_hosted_logging_elasticsearch_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs'] else ''  }}" +    openshift_hosted_logging_elasticsearch_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}"      openshift_hosted_logging_elasticsearch_ops_cluster_size: "{{ logging_elasticsearch_ops_cluster_size }}" -    openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else 'false' }}" -    openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else ''  }}" -    openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) is not none else '' }}" +    openshift_hosted_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_hosted_logging_storage_kind | default(none) == 'dynamic' else '' }}" +    openshift_hosted_logging_elasticsearch_ops_pvc_size: "{{ openshift.hosted.logging.storage.volume.size if openshift_hosted_logging_storage_kind | default(none) in ['dynamic','nfs' ] else ''  }}" +    openshift_hosted_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es' if openshift_hosted_logging_storage_kind | default(none) =='dynamic' else '' }}"    - role: cockpit-ui      when: ( openshift.common.version_gte_3_3_or_1_3  | bool ) and ( openshift_hosted_manage_registry | default(true) | bool ) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates.yml b/playbooks/common/openshift-cluster/redeploy-certificates.yml index 4996c56a7..5f008a045 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates.yml @@ -224,7 +224,7 @@    - name: Prepare for node evacuation      command: > -      {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig +      {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig        manage-node {{ openshift.node.nodename }}        --schedulable=false      delegate_to: "{{ groups.oo_first_master.0 }}" @@ -232,7 +232,7 @@    - name: Evacuate node      command: > -      {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig +      {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig        manage-node {{ openshift.node.nodename }}        --evacuate --force      delegate_to: "{{ groups.oo_first_master.0 }}" @@ -240,7 +240,7 @@    - name: Set node schedulability      command: > -      {{ openshift.common.admin_binary }} --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig +      {{ openshift.common.client_binary }} adm --config={{ hostvars[groups.oo_first_master.0].mktemp.stdout }}/admin.kubeconfig        manage-node {{ openshift.node.nodename }} --schedulable=true      delegate_to: "{{ groups.oo_first_master.0 }}"      when: openshift_certificates_redeploy_ca | default(false) | bool and was_schedulable | bool diff --git a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml index 32a3636aa..439df5ffd 100644 --- a/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/containerized_node_upgrade.yml @@ -1,5 +1,3 @@ -- include_vars: ../../../../roles/openshift_node/vars/main.yml -  - name: Update systemd units    include: ../../../../roles/openshift_node/tasks/systemd_units.yml openshift_version={{ openshift_image_tag }} diff --git a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml index 78f6c46f3..23cf8cf76 100644 --- a/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml +++ b/playbooks/common/openshift-cluster/upgrades/create_service_signer_cert.yml @@ -22,11 +22,11 @@    - name: Create service signer certificate      command: > -      {{ openshift.common.admin_binary }} ca create-signer-cert -      --cert=service-signer.crt -      --key=service-signer.key -      --name=openshift-service-serving-signer -      --serial=service-signer.serial.txt +      {{ openshift.common.client_binary }} adm ca create-signer-cert +      --cert="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.crt +      --key="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.key +      --name="{{ remote_cert_create_tmpdir.stdout }}/"openshift-service-serving-signer +      --serial="{{ remote_cert_create_tmpdir.stdout }}/"service-signer.serial.txt      args:        chdir: "{{ remote_cert_create_tmpdir.stdout }}/"      when: not (hostvars[groups.oo_first_master.0].service_signer_cert_stat.stat.exists | bool) diff --git a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml index 46ff421fd..ee75aa853 100644 --- a/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml +++ b/playbooks/common/openshift-cluster/upgrades/docker/upgrade_check.yml @@ -36,7 +36,7 @@  - set_fact:      l_docker_upgrade: False -# Make sure a docker_verison is set if none was requested: +# Make sure a docker_version is set if none was requested:  - set_fact:      docker_version: "{{ avail_docker_version.stdout }}"    when: pkg_check.rc == 0 and docker_version is not defined diff --git a/playbooks/common/openshift-cluster/upgrades/init.yml b/playbooks/common/openshift-cluster/upgrades/init.yml index f3b3abe0d..fbdb7900a 100644 --- a/playbooks/common/openshift-cluster/upgrades/init.yml +++ b/playbooks/common/openshift-cluster/upgrades/init.yml @@ -10,7 +10,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts | default([]) +    with_items: "{{ g_all_hosts | default([]) }}"  - hosts: l_oo_all_hosts    gather_facts: no diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index e43954453..2bbcbe1f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -17,10 +17,14 @@    # not already exist. We could have potentially done a replace --force to    # create and update in one step.    - openshift_examples +  - openshift_hosted_templates    # Update the existing templates    - role: openshift_examples      registry_url: "{{ openshift.master.registry_url }}"      openshift_examples_import_command: replace +  - role: openshift_hosted_templates +    registry_url: "{{ openshift.master.registry_url }}" +    openshift_hosted_templates_import_command: replace    pre_tasks:    - name: Collect all routers      command: > @@ -41,7 +45,7 @@        {{ oc_cmd }} patch dc/{{ item['labels']['deploymentconfig'] }} -n {{ item['namespace'] }} -p        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}","livenessProbe":{"tcpSocket":null,"httpGet":{"path": "/healthz", "port": 1936, "host": "localhost", "scheme": "HTTP"},"initialDelaySeconds":10,"timeoutSeconds":1}}]}}}}'        --api-version=v1 -    with_items: haproxy_routers +    with_items: "{{ haproxy_routers }}"    - name: Check for default registry      command: > diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index c80e9e74d..927d9b4ca 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -11,7 +11,7 @@      add_host:        name: "{{ item }}"        groups: etcd_hosts_to_backup -    with_items: groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master +    with_items: "{{ groups.oo_etcd_to_config if groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config | length > 0 else groups.oo_first_master }}"  - name: Backup etcd    hosts: etcd_hosts_to_backup @@ -197,19 +197,15 @@      # restart.      skip_docker_role: True    tasks: -  - name: Verifying the correct commandline tools are available -    shell: grep {{ verify_upgrade_version }} {{ openshift.common.admin_binary}} -    when: openshift.common.is_containerized | bool and verify_upgrade_version is defined -    - name: Reconcile Cluster Roles      command: > -      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        policy reconcile-cluster-roles --additive-only=true --confirm      run_once: true    - name: Reconcile Cluster Role Bindings      command: > -      {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig        policy reconcile-cluster-role-bindings        --exclude-groups=system:authenticated        --exclude-groups=system:authenticated:oauth @@ -221,7 +217,7 @@    - name: Reconcile Security Context Constraints      command: > -      {{ openshift.common.admin_binary}} policy reconcile-sccs --confirm --additive-only=true +      {{ openshift.common.client_binary }} adm policy reconcile-sccs --confirm --additive-only=true      run_once: true    - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 9b572dcdf..1f314c854 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -29,7 +29,7 @@    - name: Mark unschedulable if host is a node      command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=false +      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=false      delegate_to: "{{ groups.oo_first_master.0 }}"      when: inventory_hostname in groups.oo_nodes_to_upgrade      # NOTE: There is a transient "object has been modified" error here, allow a couple @@ -41,7 +41,7 @@    - name: Evacuate Node for Kubelet upgrade      command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --evacuate --force +      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --evacuate --force      delegate_to: "{{ groups.oo_first_master.0 }}"      when: inventory_hostname in groups.oo_nodes_to_upgrade    tasks: @@ -64,7 +64,7 @@    - name: Set node schedulability      command: > -      {{ openshift.common.admin_binary }} manage-node {{ openshift.node.nodename | lower }} --schedulable=true +      {{ openshift.common.client_binary }} adm manage-node {{ openshift.node.nodename | lower }} --schedulable=true      delegate_to: "{{ groups.oo_first_master.0 }}"      when: inventory_hostname in groups.oo_nodes_to_upgrade and was_schedulable | bool      register: node_sched diff --git a/playbooks/common/openshift-etcd/service.yml b/playbooks/common/openshift-etcd/service.yml index fd2bc24ae..f460612ba 100644 --- a/playbooks/common/openshift-etcd/service.yml +++ b/playbooks/common/openshift-etcd/service.yml @@ -10,7 +10,7 @@    - name: Evaluate g_service_etcd      add_host: name={{ item }} groups=g_service_etcd -    with_items: oo_host_group_exp | default([]) +    with_items: "{{ oo_host_group_exp | default([]) }}"  - name: Change etcd state on etcd instance(s)    hosts: g_service_etcd diff --git a/playbooks/common/openshift-loadbalancer/service.yml b/playbooks/common/openshift-loadbalancer/service.yml index e06a14c89..efc80edf9 100644 --- a/playbooks/common/openshift-loadbalancer/service.yml +++ b/playbooks/common/openshift-loadbalancer/service.yml @@ -10,7 +10,7 @@    - name: Evaluate g_service_lb      add_host: name={{ item }} groups=g_service_lb -    with_items: oo_host_group_exp | default([]) +    with_items: "{{ oo_host_group_exp | default([]) }}"  - name: Change state on lb instance(s)    hosts: g_service_lb diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml index 57a63cfee..5769ef5cd 100644 --- a/playbooks/common/openshift-master/restart.yml +++ b/playbooks/common/openshift-master/restart.yml @@ -66,63 +66,8 @@        current_host: "{{ exists.stat.exists }}"      when: openshift.common.rolling_restart_mode == 'system' -- name: Determine which masters are currently active -  hosts: oo_masters_to_config -  any_errors_fatal: true -  tasks: -  - name: Check master service status -    command: > -      systemctl is-active {{ openshift.common.service_type }}-master -    register: active_check_output -    when: openshift.master.cluster_method | default(None) == 'pacemaker' -    failed_when: false -    changed_when: false -  - set_fact: -      is_active: "{{ active_check_output.stdout == 'active' }}" -    when: openshift.master.cluster_method | default(None) == 'pacemaker' - -- name: Evaluate master groups -  hosts: localhost -  become: no -  tasks: -  - fail: -      msg: > -        Did not receive active status from any masters. Please verify pacemaker cluster. -    when: "{{ hostvars[groups.oo_first_master.0].openshift.master.cluster_method | default(None) == 'pacemaker' and 'True' not in (hostvars -              | oo_select_keys(groups['oo_masters_to_config']) -              | oo_collect('is_active') -              | list) }}" -  - name: Evaluate oo_active_masters -    add_host: -      name: "{{ item }}" -      groups: oo_active_masters -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_become: "{{ g_sudo | default(omit) }}" -    with_items: "{{ groups.oo_masters_to_config | default([]) }}" -    when: (hostvars[item]['is_active'] | default(false)) | bool -  - name: Evaluate oo_current_masters -    add_host: -      name: "{{ item }}" -      groups: oo_current_masters -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_become: "{{ g_sudo | default(omit) }}" -    with_items: "{{ groups.oo_masters_to_config | default([]) }}" -    when: (hostvars[item]['current_host'] | default(false)) | bool - -- name: Validate pacemaker cluster -  hosts: oo_active_masters -  tasks: -  - name: Retrieve pcs status -    command: pcs status -    register: pcs_status_output -    changed_when: false -  - fail: -      msg: > -        Pacemaker cluster validation failed. One or more nodes are not online. -    when: not (pcs_status_output.stdout | validate_pcs_cluster(groups.oo_masters_to_config)) | bool -  - name: Restart masters -  hosts: oo_masters_to_config:!oo_active_masters:!oo_current_masters +  hosts: oo_masters_to_config    vars:      openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"    serial: 1 @@ -132,20 +77,3 @@    - include: restart_services.yml      when: openshift.common.rolling_restart_mode == 'services' -- name: Restart active masters -  hosts: oo_active_masters -  serial: 1 -  tasks: -  - include: restart_hosts_pacemaker.yml -    when: openshift.common.rolling_restart_mode == 'system' -  - include: restart_services_pacemaker.yml -    when: openshift.common.rolling_restart_mode == 'services' - -- name: Restart current masters -  hosts: oo_current_masters -  serial: 1 -  tasks: -  - include: restart_hosts.yml -    when: openshift.common.rolling_restart_mode == 'system' -  - include: restart_services.yml -    when: openshift.common.rolling_restart_mode == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml index ff206f5a2..b1c36718c 100644 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ b/playbooks/common/openshift-master/restart_hosts.yml @@ -5,8 +5,8 @@    poll: 0    ignore_errors: true    become: yes -# When cluster_method != pacemaker we can ensure the api_port is -# available. + +# Ensure the api_port is available.  - name: Wait for master API to come back online    become: no    local_action: @@ -15,25 +15,3 @@        state=started        delay=10        port="{{ openshift.master.api_port }}" -  when: openshift.master.cluster_method != 'pacemaker' -- name: Wait for master to start -  become: no -  local_action: -    module: wait_for -      host="{{ inventory_hostname }}" -      state=started -      delay=10 -      port=22 -  when: openshift.master.cluster_method == 'pacemaker' -- name: Wait for master to become available -  command: pcs status -  register: pcs_status_output -  until: pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname]) | bool -  retries: 15 -  delay: 2 -  changed_when: false -  when: openshift.master.cluster_method == 'pacemaker' -- fail: -    msg: > -      Pacemaker cluster validation failed {{ inventory hostname }} is not online. -  when: openshift.master.cluster_method == 'pacemaker' and not (pcs_status_output.stdout | validate_pcs_cluster([inventory_hostname])) | bool diff --git a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml b/playbooks/common/openshift-master/restart_hosts_pacemaker.yml deleted file mode 100644 index c9219e8de..000000000 --- a/playbooks/common/openshift-master/restart_hosts_pacemaker.yml +++ /dev/null @@ -1,25 +0,0 @@ -- name: Fail over master resource -  command: > -    pcs resource move master {{ hostvars | oo_select_keys(groups['oo_masters_to_config']) | oo_collect('openshift.common.hostname', {'is_active': 'False'}) | list | first }} -- name: Wait for master API to come back online -  become: no -  local_action: -    module: wait_for -      host="{{ openshift.master.cluster_hostname }}" -      state=started -      delay=10 -      port="{{ openshift.master.api_port }}" -- name: Restart master system -  # https://github.com/ansible/ansible/issues/10616 -  shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" -  async: 1 -  poll: 0 -  ignore_errors: true -  become: yes -- name: Wait for master to start -  become: no -  local_action: -   module: wait_for -      host="{{ inventory_hostname }}" -      state=started -      delay=10 diff --git a/playbooks/common/openshift-master/restart_services_pacemaker.yml b/playbooks/common/openshift-master/restart_services_pacemaker.yml deleted file mode 100644 index e738f3fb6..000000000 --- a/playbooks/common/openshift-master/restart_services_pacemaker.yml +++ /dev/null @@ -1,10 +0,0 @@ -- name: Restart master services -  command: pcs resource restart master -- name: Wait for master API to come back online -  become: no -  local_action: -    module: wait_for -      host="{{ openshift.master.cluster_hostname }}" -      state=started -      delay=10 -      port="{{ openshift.master.api_port }}" diff --git a/playbooks/common/openshift-master/service.yml b/playbooks/common/openshift-master/service.yml index f60c5a2b5..5e5198335 100644 --- a/playbooks/common/openshift-master/service.yml +++ b/playbooks/common/openshift-master/service.yml @@ -10,7 +10,7 @@    - name: Evaluate g_service_masters      add_host: name={{ item }} groups=g_service_masters -    with_items: oo_host_group_exp | default([]) +    with_items: "{{ oo_host_group_exp | default([]) }}"  - name: Change state on master instance(s)    hosts: g_service_masters diff --git a/playbooks/common/openshift-nfs/service.yml b/playbooks/common/openshift-nfs/service.yml index 20c8ca248..8468014da 100644 --- a/playbooks/common/openshift-nfs/service.yml +++ b/playbooks/common/openshift-nfs/service.yml @@ -8,7 +8,7 @@    - name: Evaluate g_service_nfs      add_host: name={{ item }} groups=g_service_nfs -    with_items: oo_host_group_exp | default([]) +    with_items: "{{ oo_host_group_exp | default([]) }}"  - name: Change state on nfs instance(s)    hosts: g_service_nfs diff --git a/playbooks/common/openshift-node/service.yml b/playbooks/common/openshift-node/service.yml index 0f07add2a..33095c9fb 100644 --- a/playbooks/common/openshift-node/service.yml +++ b/playbooks/common/openshift-node/service.yml @@ -10,7 +10,7 @@    - name: Evaluate g_service_nodes      add_host: name={{ item }} groups=g_service_nodes -    with_items: oo_host_group_exp | default([]) +    with_items: "{{ oo_host_group_exp | default([]) }}"  - name: Change state on node instance(s)    hosts: g_service_nodes diff --git a/playbooks/gce/openshift-cluster/list.yml b/playbooks/gce/openshift-cluster/list.yml index c29cac272..34dcd2496 100644 --- a/playbooks/gce/openshift-cluster/list.yml +++ b/playbooks/gce/openshift-cluster/list.yml @@ -16,7 +16,7 @@        groups: oo_list_hosts        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) +    with_items: "{{ groups[scratch_group] | default([], true) | difference(['localhost']) | difference(groups.status_terminated | default([], true)) }}"  - name: List Hosts    hosts: oo_list_hosts diff --git a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml index 60cf21a5b..7c8189224 100644 --- a/playbooks/gce/openshift-cluster/tasks/launch_instances.yml +++ b/playbooks/gce/openshift-cluster/tasks/launch_instances.yml @@ -49,11 +49,11 @@      gce_public_ip: "{{ item.public_ip }}"      gce_private_ip: "{{ item.private_ip }}"      openshift_node_labels: "{{ node_label }}" -  with_items: gce.instance_data | default([], true) +  with_items: "{{ gce.instance_data | default([], true) }}"  - name: Wait for ssh    wait_for: port=22 host={{ item.public_ip }} -  with_items: gce.instance_data | default([], true) +  with_items: "{{ gce.instance_data | default([], true) }}"  - name: Wait for user setup    command: "ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null {{ hostvars[item.name].ansible_ssh_user }}@{{ item.public_ip }} echo {{ hostvars[item.name].ansible_ssh_user }} user is setup" @@ -61,4 +61,4 @@    until: result.rc == 0    retries: 30    delay: 5 -  with_items: gce.instance_data | default([], true) +  with_items: "{{ gce.instance_data | default([], true) }}" diff --git a/playbooks/gce/openshift-cluster/terminate.yml b/playbooks/gce/openshift-cluster/terminate.yml index 6a0ac088a..68e60f9d4 100644 --- a/playbooks/gce/openshift-cluster/terminate.yml +++ b/playbooks/gce/openshift-cluster/terminate.yml @@ -12,7 +12,7 @@        groups: oo_hosts_to_terminate        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) +    with_items: "{{ (groups['tag_clusterid-' ~ cluster_id] | default([])) | difference(['localhost']) }}"  - name: Unsubscribe VMs    hosts: oo_hosts_to_terminate @@ -43,7 +43,7 @@          pem_file: "{{ lookup('env', 'gce_service_account_pem_file_path') }}"          project_id: "{{ lookup('env', 'gce_project_id') }}"          zone: "{{ lookup('env', 'zone') }}" -      with_items: groups['oo_hosts_to_terminate'] | default([], true) +      with_items: "{{ groups['oo_hosts_to_terminate'] | default([], true) }}"        when: item is defined  #- include: ../openshift-node/terminate.yml diff --git a/playbooks/gce/openshift-cluster/update.yml b/playbooks/gce/openshift-cluster/update.yml index 332f27da7..6d2af3d26 100644 --- a/playbooks/gce/openshift-cluster/update.yml +++ b/playbooks/gce/openshift-cluster/update.yml @@ -7,7 +7,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts +    with_items: "{{ g_all_hosts }}"  - hosts: l_oo_all_hosts    gather_facts: no @@ -27,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: g_all_hosts | default([]) +    with_items: "{{ g_all_hosts | default([]) }}"  - include: ../../common/openshift-cluster/update_repos_and_packages.yml diff --git a/playbooks/libvirt/openshift-cluster/list.yml b/playbooks/libvirt/openshift-cluster/list.yml index eb64544db..86d5d0aad 100644 --- a/playbooks/libvirt/openshift-cluster/list.yml +++ b/playbooks/libvirt/openshift-cluster/list.yml @@ -16,7 +16,7 @@        groups: oo_list_hosts        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: groups[scratch_group] | default([]) | difference(['localhost']) +    with_items: "{{ groups[scratch_group] | default([]) | difference(['localhost']) }}"  - name: List Hosts    hosts: oo_list_hosts diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml index df5c52f2d..81e6d8f05 100644 --- a/playbooks/libvirt/openshift-cluster/terminate.yml +++ b/playbooks/libvirt/openshift-cluster/terminate.yml @@ -1,5 +1,5 @@  --- -# TODO: does not handle a non-existant cluster gracefully +# TODO: does not handle a non-existent cluster gracefully  - name: Terminate instance(s)    hosts: localhost diff --git a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml index 755090f94..20ce47c07 100644 --- a/playbooks/openstack/openshift-cluster/files/heat_stack.yaml +++ b/playbooks/openstack/openshift-cluster/files/heat_stack.yaml @@ -45,7 +45,7 @@ parameters:    node_port_incoming:      type: string      label: Source of node port connections -    description: Authorized sources targetting node ports +    description: Authorized sources targeting node ports      default: 0.0.0.0/0    num_etcd: diff --git a/playbooks/openstack/openshift-cluster/update.yml b/playbooks/openstack/openshift-cluster/update.yml index 332f27da7..6d2af3d26 100644 --- a/playbooks/openstack/openshift-cluster/update.yml +++ b/playbooks/openstack/openshift-cluster/update.yml @@ -7,7 +7,7 @@    - add_host:        name: "{{ item }}"        groups: l_oo_all_hosts -    with_items: g_all_hosts +    with_items: "{{ g_all_hosts }}"  - hosts: l_oo_all_hosts    gather_facts: no @@ -27,7 +27,7 @@        groups: oo_hosts_to_update        ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"        ansible_become: "{{ deployment_vars[deployment_type].become }}" -    with_items: g_all_hosts | default([]) +    with_items: "{{ g_all_hosts | default([]) }}"  - include: ../../common/openshift-cluster/update_repos_and_packages.yml | 
