diff options
Diffstat (limited to 'playbooks/common')
46 files changed, 18 insertions, 1096 deletions
| diff --git a/playbooks/common/openshift-cluster/cockpit-ui.yml b/playbooks/common/openshift-cluster/cockpit-ui.yml deleted file mode 100644 index 359132dd0..000000000 --- a/playbooks/common/openshift-cluster/cockpit-ui.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Create Hosted Resources - cockpit-ui -  hosts: oo_first_master -  roles: -  - role: cockpit-ui -    when: -    - openshift_hosted_manage_registry | default(true) | bool -    - not openshift.docker.hosted_registry_insecure | default(false) | bool diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index fce5b652d..c01e17115 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -9,16 +9,16 @@  - include: ../../openshift-loadbalancer/private/config.yml    when: groups.oo_lb_to_config | default([]) | count > 0 -- include: ../openshift-master/config.yml +- include: ../../openshift-master/private/config.yml -- include: ../openshift-master/additional_config.yml +- include: ../../openshift-master/private/additional_config.yml  - include: ../../openshift-node/private/config.yml -- include: ../openshift-glusterfs/config.yml +- include: ../../openshift-glusterfs/private/config.yml    when: groups.oo_glusterfs_to_config | default([]) | count > 0 -- include: openshift_hosted.yml +- include: ../../openshift-hosted/private/config.yml  - include: ../../openshift-metrics/private/config.yml    when: openshift_metrics_install_metrics | default(false) | bool @@ -26,10 +26,13 @@  - include: openshift_logging.yml    when: openshift_logging_install_logging | default(false) | bool +- include: ../../openshift-prometheus/private/config.yml +  when: openshift_hosted_prometheus_deploy | default(false) | bool +  - include: service_catalog.yml    when: openshift_enable_service_catalog | default(true) | bool -- include: ../openshift-management/config.yml +- include: ../../openshift-management/private/config.yml    when: openshift_management_install_management | default(false) | bool  - name: Print deprecated variable warning message if necessary diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml deleted file mode 100644 index 8a60a30b8..000000000 --- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Create Hosted Resources - persistent volumes -  hosts: oo_first_master -  vars: -    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}" -    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}" -  roles: -  - role: openshift_persistent_volumes -    when: persistent_volumes | length > 0 or persistent_volume_claims | length > 0 diff --git a/playbooks/common/openshift-cluster/install_docker_gc.yml b/playbooks/common/openshift-cluster/install_docker_gc.yml deleted file mode 100644 index 1e3dfee07..000000000 --- a/playbooks/common/openshift-cluster/install_docker_gc.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Install docker gc -  hosts: oo_first_master -  gather_facts: false -  tasks: -    - include_role: -        name: openshift_docker_gc diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml deleted file mode 100644 index 62fe0dd60..000000000 --- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml +++ /dev/null @@ -1,6 +0,0 @@ ---- -- name: Create Hosted Resources - openshift_default_storage_class -  hosts: oo_first_master -  roles: -  - role: openshift_default_storage_class -    when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml deleted file mode 100644 index 15ee60dc0..000000000 --- a/playbooks/common/openshift-cluster/openshift_hosted.yml +++ /dev/null @@ -1,44 +0,0 @@ ---- -- name: Hosted Install Checkpoint Start -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Hosted install 'In Progress' -    run_once: true -    set_stats: -      data: -        installer_phase_hosted: -          status: "In Progress" -          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - -- include: create_persistent_volumes.yml - -- include: openshift_default_storage_class.yml - -- include: openshift_hosted_create_projects.yml - -- include: openshift_hosted_router.yml - -- include: openshift_hosted_registry.yml - -- include: cockpit-ui.yml - -- include: openshift_prometheus.yml -  when: openshift_hosted_prometheus_deploy | default(False) | bool - -- include: install_docker_gc.yml -  when: -  - openshift_use_crio | default(False) | bool -  - openshift_crio_enable_docker_gc | default(False) | bool - -- name: Hosted Install Checkpoint End -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Hosted install 'Complete' -    run_once: true -    set_stats: -      data: -        installer_phase_hosted: -          status: "Complete" -          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml b/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml deleted file mode 100644 index d5ca5185c..000000000 --- a/playbooks/common/openshift-cluster/openshift_hosted_create_projects.yml +++ /dev/null @@ -1,7 +0,0 @@ ---- -- name: Create Hosted Resources - openshift projects -  hosts: oo_first_master -  tasks: -  - include_role: -      name: openshift_hosted -      tasks_from: create_projects.yml diff --git a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml b/playbooks/common/openshift-cluster/openshift_hosted_registry.yml deleted file mode 100644 index 2a91a827c..000000000 --- a/playbooks/common/openshift-cluster/openshift_hosted_registry.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Create Hosted Resources - registry -  hosts: oo_first_master -  tasks: -  - set_fact: -      openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" -    when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" -  - include_role: -      name: openshift_hosted -      tasks_from: registry.yml -    when: -    - openshift_hosted_manage_registry | default(True) | bool -    - openshift_hosted_registry_registryurl is defined diff --git a/playbooks/common/openshift-cluster/openshift_hosted_router.yml b/playbooks/common/openshift-cluster/openshift_hosted_router.yml deleted file mode 100644 index bcb5a34a4..000000000 --- a/playbooks/common/openshift-cluster/openshift_hosted_router.yml +++ /dev/null @@ -1,13 +0,0 @@ ---- -- name: Create Hosted Resources - router -  hosts: oo_first_master -  tasks: -  - set_fact: -      openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" -    when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" -  - include_role: -      name: openshift_hosted -      tasks_from: router.yml -    when: -    - openshift_hosted_manage_router | default(True) | bool -    - openshift_hosted_router_registryurl is defined diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml deleted file mode 100644 index 7aa9a16e6..000000000 --- a/playbooks/common/openshift-cluster/openshift_prometheus.yml +++ /dev/null @@ -1,29 +0,0 @@ ---- -- name: Prometheus Install Checkpoint Start -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Prometheus install 'In Progress' -    run_once: true -    set_stats: -      data: -        installer_phase_prometheus: -          status: "In Progress" -          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - -- name: Create Hosted Resources - openshift_prometheus -  hosts: oo_first_master -  roles: -  - role: openshift_prometheus - -- name: Prometheus Install Checkpoint End -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Prometheus install 'Complete' -    run_once: true -    set_stats: -      data: -        installer_phase_prometheus: -          status: "Complete" -          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml index 8dc8c4afb..438f704bc 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -82,7 +82,7 @@        state: absent      changed_when: false -- include: ../../openshift-master/restart.yml +- include: ../../../openshift-master/private/restart.yml    # Do not restart masters when master or etcd certificates were previously expired.    when:    # masters diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index b58bf3c91..5a837d80d 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -207,7 +207,7 @@        group: "{{ 'root' if item == 'root' else _ansible_ssh_user_gid.stdout  }}"      with_items: "{{ client_users }}" -- include: ../../openshift-master/restart.yml +- include: ../../../openshift-master/private/restart.yml    # Do not restart masters when master or etcd certificates were previously expired.    when:    # masters diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index fa65567c2..52438bdc4 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -85,10 +85,10 @@    - include: "{{ openshift_master_upgrade_hook }}"      when: openshift_master_upgrade_hook is defined -  - include: ../../openshift-master/restart_hosts.yml +  - include: ../../../openshift-master/private/restart_hosts.yml      when: openshift.common.rolling_restart_mode == 'system' -  - include: ../../openshift-master/restart_services.yml +  - include: ../../../openshift-master/private/restart_services.yml      when: openshift.common.rolling_restart_mode == 'services'    # Run the post-upgrade hook if defined: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index ef52f214b..6cb6a665f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -79,7 +79,7 @@      # docker is configured and running.      skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index 4c6646a38..8f48bedcc 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -83,7 +83,7 @@      # docker is configured and running.      skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml index e3c012380..2b99568c7 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml @@ -83,7 +83,7 @@      # docker is configured and running.      skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml index a88fa7b2e..d3d2046e6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml @@ -87,7 +87,7 @@      # docker is configured and running.      skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml index 73df15d53..b602cdd0e 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade.yml @@ -83,7 +83,7 @@      # docker is configured and running.      skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml index 48d55c16f..da81e6dea 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_8/upgrade_control_plane.yml @@ -87,7 +87,7 @@      # docker is configured and running.      skip_docker_role: True -- include: ../../../openshift-master/validate_restart.yml +- include: ../../../../openshift-master/private/validate_restart.yml    tags:    - pre_upgrade diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml deleted file mode 100644 index 19e14ab3e..000000000 --- a/playbooks/common/openshift-glusterfs/config.yml +++ /dev/null @@ -1,60 +0,0 @@ ---- -- name: GlusterFS Install Checkpoint Start -  hosts: all -  gather_facts: false -  tasks: -  - name: Set GlusterFS install 'In Progress' -    run_once: true -    set_stats: -      data: -        installer_phase_glusterfs: -          status: "In Progress" -          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - -- name: Open firewall ports for GlusterFS nodes -  hosts: glusterfs -  tasks: -  - include_role: -      name: openshift_storage_glusterfs -      tasks_from: firewall.yml -    when: -    - openshift_storage_glusterfs_is_native | default(True) | bool -  - include_role: -      name: openshift_storage_glusterfs -      tasks_from: kernel_modules.yml -    when: -    - openshift_storage_glusterfs_is_native | default(True) | bool - -- name: Open firewall ports for GlusterFS registry nodes -  hosts: glusterfs_registry -  tasks: -  - include_role: -      name: openshift_storage_glusterfs -      tasks_from: firewall.yml -    when: -    - openshift_storage_glusterfs_registry_is_native | default(True) | bool -  - include_role: -      name: openshift_storage_glusterfs -      tasks_from: kernel_modules.yml -    when: -    - openshift_storage_glusterfs_registry_is_native | default(True) | bool - -- name: Configure GlusterFS -  hosts: oo_first_master -  tasks: -  - name: setup glusterfs -    include_role: -      name: openshift_storage_glusterfs -    when: groups.oo_glusterfs_to_config | default([]) | count > 0 - -- name: GlusterFS Install Checkpoint End -  hosts: all -  gather_facts: false -  tasks: -  - name: Set GlusterFS install 'Complete' -    run_once: true -    set_stats: -      data: -        installer_phase_glusterfs: -          status: "Complete" -          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-glusterfs/filter_plugins b/playbooks/common/openshift-glusterfs/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-glusterfs/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/lookup_plugins b/playbooks/common/openshift-glusterfs/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-glusterfs/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-glusterfs/registry.yml b/playbooks/common/openshift-glusterfs/registry.yml deleted file mode 100644 index 80cf7529e..000000000 --- a/playbooks/common/openshift-glusterfs/registry.yml +++ /dev/null @@ -1,49 +0,0 @@ ---- -- include: config.yml - -- name: Initialize GlusterFS registry PV and PVC vars -  hosts: oo_first_master -  tags: hosted -  tasks: -  - set_fact: -      glusterfs_pv: [] -      glusterfs_pvc: [] - -  - set_fact: -      glusterfs_pv: -      - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-volume" -        capacity: "{{ openshift.hosted.registry.storage.volume.size }}" -        access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" -        storage: -          glusterfs: -            endpoints: "{{ openshift.hosted.registry.storage.glusterfs.endpoints }}" -            path: "{{ openshift.hosted.registry.storage.glusterfs.path }}" -            readOnly: "{{ openshift.hosted.registry.storage.glusterfs.readOnly }}" -      glusterfs_pvc: -      - name: "{{ openshift.hosted.registry.storage.volume.name }}-glusterfs-claim" -        capacity: "{{ openshift.hosted.registry.storage.volume.size }}" -        access_modes: "{{ openshift.hosted.registry.storage.access.modes }}" -    when: openshift.hosted.registry.storage.glusterfs.swap - -- name: Create persistent volumes -  hosts: oo_first_master -  tags: -  - hosted -  vars: -    persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups, glusterfs_pv) }}" -    persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims(glusterfs_pvc) }}" -  roles: -  - role: openshift_persistent_volumes -    when: persistent_volumes | union(glusterfs_pv) | length > 0 or persistent_volume_claims | union(glusterfs_pvc) | length > 0 - -- name: Create Hosted Resources -  hosts: oo_first_master -  tags: -  - hosted -  pre_tasks: -  - set_fact: -      openshift_hosted_router_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" -      openshift_hosted_registry_registryurl: "{{ hostvars[groups.oo_first_master.0].openshift.master.registry_url }}" -    when: "'master' in hostvars[groups.oo_first_master.0].openshift and 'registry_url' in hostvars[groups.oo_first_master.0].openshift.master" -  roles: -  - role: openshift_hosted diff --git a/playbooks/common/openshift-glusterfs/roles b/playbooks/common/openshift-glusterfs/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-glusterfs/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml deleted file mode 100644 index facb3a5b9..000000000 --- a/playbooks/common/openshift-management/add_container_provider.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Add Container Provider to Management -  hosts: oo_first_master -  tasks: -  - name: Run the Management Integration Tasks -    include_role: -      name: openshift_management -      tasks_from: add_container_provider diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml deleted file mode 100644 index 3f1cdf713..000000000 --- a/playbooks/common/openshift-management/config.yml +++ /dev/null @@ -1,39 +0,0 @@ ---- -- name: Management Install Checkpoint Start -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Management install 'In Progress' -    run_once: true -    set_stats: -      data: -        installer_phase_management: -          status: "In Progress" -          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - -- name: Setup CFME -  hosts: oo_first_master -  pre_tasks: -  - name: Create a temporary place to evaluate the PV templates -    command: mktemp -d /tmp/openshift-ansible-XXXXXXX -    register: r_openshift_management_mktemp -    changed_when: false - -  tasks: -  - name: Run the CFME Setup Role -    include_role: -      name: openshift_management -    vars: -      template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}" - -- name: Management Install Checkpoint End -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Management install 'Complete' -    run_once: true -    set_stats: -      data: -        installer_phase_management: -          status: "Complete" -          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-management/filter_plugins b/playbooks/common/openshift-management/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-management/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-management/library b/playbooks/common/openshift-management/library deleted file mode 120000 index ba40d2f56..000000000 --- a/playbooks/common/openshift-management/library +++ /dev/null @@ -1 +0,0 @@ -../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-management/roles b/playbooks/common/openshift-management/roles deleted file mode 120000 index 20c4c58cf..000000000 --- a/playbooks/common/openshift-management/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-management/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml deleted file mode 100644 index 9f35cc276..000000000 --- a/playbooks/common/openshift-management/uninstall.yml +++ /dev/null @@ -1,8 +0,0 @@ ---- -- name: Uninstall CFME -  hosts: masters[0] -  tasks: -  - name: Run the CFME Uninstall Role Tasks -    include_role: -      name: openshift_management -      tasks_from: uninstall diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml deleted file mode 100644 index 32f638d42..000000000 --- a/playbooks/common/openshift-master/additional_config.yml +++ /dev/null @@ -1,52 +0,0 @@ ---- -- name: Master Additional Install Checkpoint Start -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Master Additional install 'In Progress' -    run_once: true -    set_stats: -      data: -        installer_phase_master_additional: -          status: "In Progress" -          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - -- name: Additional master configuration -  hosts: oo_first_master -  vars: -    cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}" -    etcd_urls: "{{ openshift.master.etcd_urls }}" -    openshift_master_ha: "{{ groups.oo_masters | length > 1 }}" -    omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}" -  roles: -  - role: openshift_master_cluster -    when: openshift_master_ha | bool and openshift.master.cluster_method == "pacemaker" -  - role: openshift_project_request_template -    when: openshift_project_request_template_manage -  - role: openshift_examples -    when: openshift_install_examples | default(true, true) | bool -    registry_url: "{{ openshift.master.registry_url }}" -  - role: openshift_hosted_templates -    registry_url: "{{ openshift.master.registry_url }}" -  - role: openshift_manageiq -    when: openshift_use_manageiq | default(true) | bool -  - role: cockpit -    when: -    - not openshift.common.is_atomic | bool -    - deployment_type == 'openshift-enterprise' -    - osm_use_cockpit is undefined or osm_use_cockpit | bool -    - openshift.common.deployment_subtype != 'registry' -  - role: flannel_register -    when: openshift_use_flannel | default(false) | bool - -- name: Master Additional Install Checkpoint End -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Master Additional install 'Complete' -    run_once: true -    set_stats: -      data: -        installer_phase_master_additional: -          status: "Complete" -          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/certificates.yml b/playbooks/common/openshift-master/certificates.yml deleted file mode 100644 index f6afbc36f..000000000 --- a/playbooks/common/openshift-master/certificates.yml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Create OpenShift certificates for master hosts -  hosts: oo_masters_to_config -  vars: -    openshift_ca_host: "{{ groups.oo_first_master.0 }}" -  roles: -  - role: openshift_master_facts -  - role: openshift_named_certificates -  - role: openshift_ca -  - role: openshift_master_certificates -    openshift_master_etcd_hosts: "{{ hostvars -                                     | oo_select_keys(groups['oo_etcd_to_config'] | default([])) -                                     | oo_collect('openshift.common.hostname') -                                     | default(none, true) }}" diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml deleted file mode 100644 index 6b0fd6b7c..000000000 --- a/playbooks/common/openshift-master/config.yml +++ /dev/null @@ -1,252 +0,0 @@ ---- -- name: Master Install Checkpoint Start -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Master install 'In Progress' -    run_once: true -    set_stats: -      data: -        installer_phase_master: -          status: "In Progress" -          start: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" - -- include: certificates.yml - -- name: Disable excluders -  hosts: oo_masters_to_config -  gather_facts: no -  roles: -  - role: openshift_excluder -    r_openshift_excluder_action: disable -    r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - -- name: Gather and set facts for master hosts -  hosts: oo_masters_to_config -  pre_tasks: -  # Per https://bugzilla.redhat.com/show_bug.cgi?id=1469336 -  # -  # When scaling up a cluster upgraded from OCP <= 3.5, ensure that -  # OPENSHIFT_DEFAULT_REGISTRY is present as defined on the existing -  # masters, or absent if such is the case. -  - name: Detect if this host is a new master in a scale up -    set_fact: -      g_openshift_master_is_scaleup: "{{ openshift.common.hostname in ( groups['new_masters'] | default([]) ) }}" - -  - name: Scaleup Detection -    debug: -      var: g_openshift_master_is_scaleup - -  - name: Check for RPM generated config marker file .config_managed -    stat: -      path: /etc/origin/.config_managed -    register: rpmgenerated_config - -  - name: Remove RPM generated config files if present -    file: -      path: "/etc/origin/{{ item }}" -      state: absent -    when: -    - rpmgenerated_config.stat.exists == true -    - deployment_type == 'openshift-enterprise' -    with_items: -    - master -    - node -    - .config_managed - -  - set_fact: -      openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}" -      openshift_master_etcd_hosts: "{{ hostvars -                                       | oo_select_keys(groups['oo_etcd_to_config'] -                                                        | default([])) -                                       | oo_collect('openshift.common.hostname') -                                       | default(none, true) }}" -  roles: -  - openshift_facts -  post_tasks: -  - openshift_facts: -      role: master -      local_facts: -        api_port: "{{ openshift_master_api_port | default(None) }}" -        api_url: "{{ openshift_master_api_url | default(None) }}" -        api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}" -        controllers_port: "{{ openshift_master_controllers_port | default(None) }}" -        public_api_url: "{{ openshift_master_public_api_url | default(None) }}" -        cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}" -        cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}" -        console_path: "{{ openshift_master_console_path | default(None) }}" -        console_port: "{{ openshift_master_console_port | default(None) }}" -        console_url: "{{ openshift_master_console_url | default(None) }}" -        console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}" -        public_console_url: "{{ openshift_master_public_console_url | default(None) }}" -        ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" -        master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" - -- name: Inspect state of first master config settings -  hosts: oo_first_master -  roles: -  - role: openshift_facts -  post_tasks: -  - openshift_facts: -      role: master -      local_facts: -        session_auth_secrets: "{{ openshift_master_session_auth_secrets | default(openshift.master.session_auth_secrets | default(None)) }}" -        session_encryption_secrets: "{{ openshift_master_session_encryption_secrets | default(openshift.master.session_encryption_secrets | default(None)) }}" -  - name: Check for existing configuration -    stat: -      path: /etc/origin/master/master-config.yaml -    register: master_config_stat - -  - name: Set clean install fact -    set_fact: -      l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" - -  - name: Determine if etcd3 storage is in use -    command: grep  -Pzo  "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q -    register: etcd3_grep -    failed_when: false -    changed_when: false - -  - name: Set etcd3 fact -    set_fact: -      l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" - -  - name: Check if atomic-openshift-master sysconfig exists yet -    stat: -      path: /etc/sysconfig/atomic-openshift-master -    register: l_aom_exists - -  - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master parameter if present -    command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master -    register: l_default_registry_defined -    when: l_aom_exists.stat.exists | bool - -  - name: Check if atomic-openshift-master-api sysconfig exists yet -    stat: -      path: /etc/sysconfig/atomic-openshift-master-api -    register: l_aom_api_exists - -  - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-api parameter if present -    command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-api -    register: l_default_registry_defined_api -    when: l_aom_api_exists.stat.exists | bool - -  - name: Check if atomic-openshift-master-controllers sysconfig exists yet -    stat: -      path: /etc/sysconfig/atomic-openshift-master-controllers -    register: l_aom_controllers_exists - -  - name: Preserve OPENSHIFT_DEFAULT_REGISTRY master-controllers parameter if present -    command: awk '/^OPENSHIFT_DEFAULT_REGISTRY/' /etc/sysconfig/atomic-openshift-master-controllers -    register: l_default_registry_defined_controllers -    when: l_aom_controllers_exists.stat.exists | bool - -  - name: Update facts with OPENSHIFT_DEFAULT_REGISTRY value -    set_fact: -      l_default_registry_value: "{{ l_default_registry_defined.stdout | default('') }}" -      l_default_registry_value_api: "{{ l_default_registry_defined_api.stdout | default('') }}" -      l_default_registry_value_controllers: "{{ l_default_registry_defined_controllers.stdout | default('') }}" - -- name: Generate master session secrets -  hosts: oo_first_master -  vars: -    g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}" -    g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}" -    g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}" -  roles: -  - role: openshift_facts -  tasks: -  - openshift_facts: -      role: master -      local_facts: -        session_auth_secrets: "{{ g_session_auth_secrets }}" -        session_encryption_secrets: "{{ g_session_encryption_secrets }}" -    when: not g_session_secrets_present | bool - -- name: Configure masters -  hosts: oo_masters_to_config -  any_errors_fatal: true -  vars: -    openshift_master_ha: "{{ openshift.master.ha }}" -    openshift_master_count: "{{ openshift.master.master_count }}" -    openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}" -    openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" -    openshift_ca_host: "{{ groups.oo_first_master.0 }}" -    openshift_master_etcd_hosts: "{{ hostvars -                                     | oo_select_keys(groups['oo_etcd_to_config'] | default([])) -                                     | oo_collect('openshift.common.hostname') -                                     | default(none, true) }}" -    openshift_no_proxy_etcd_host_ips: "{{ hostvars | oo_select_keys(groups['oo_etcd_to_config'] | default([])) -                                                | oo_collect('openshift.common.ip') | default([]) | join(',') -                                                }}" -  roles: -  - role: os_firewall -  - role: openshift_master_facts -  - role: openshift_hosted_facts -  - role: openshift_clock -  - role: openshift_cloud_provider -  - role: openshift_builddefaults -  - role: openshift_buildoverrides -  - role: nickhammond.logrotate -  - role: contiv -    contiv_role: netmaster -    when: openshift_use_contiv | default(False) | bool -  - role: openshift_master -    openshift_master_hosts: "{{ groups.oo_masters_to_config }}" -    r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" -    r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" -    openshift_master_is_scaleup_host: "{{ g_openshift_master_is_scaleup | default(false) }}" -    openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}" -    openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}" -    openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}" -  - role: tuned -  - role: nuage_ca -    when: openshift_use_nuage | default(false) | bool -  - role: nuage_common -    when: openshift_use_nuage | default(false) | bool -  - role: nuage_master -    when: openshift_use_nuage | default(false) | bool -  - role: calico_master -    when: openshift_use_calico | default(false) | bool -  tasks: -  - include_role: -      name: kuryr -      tasks_from: master -    when: openshift_use_kuryr | default(false) | bool - -  - name: Setup the node group config maps -    include_role: -      name: openshift_node_group -    when: openshift_master_bootstrap_enabled | default(false) | bool -    run_once: True - -  post_tasks: -  - name: Create group for deployment type -    group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }} -    changed_when: False - -- name: Configure API Aggregation on masters -  hosts: oo_masters -  serial: 1 -  tasks: -  - include: tasks/wire_aggregator.yml - -- name: Re-enable excluder if it was previously enabled -  hosts: oo_masters_to_config -  gather_facts: no -  roles: -  - role: openshift_excluder -    r_openshift_excluder_action: enable -    r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" - -- name: Master Install Checkpoint End -  hosts: all -  gather_facts: false -  tasks: -  - name: Set Master install 'Complete' -    run_once: true -    set_stats: -      data: -        installer_phase_master: -          status: "Complete" -          end: "{{ lookup('pipe', 'date +%Y%m%d%H%M%SZ') }}" diff --git a/playbooks/common/openshift-master/filter_plugins b/playbooks/common/openshift-master/filter_plugins deleted file mode 120000 index 99a95e4ca..000000000 --- a/playbooks/common/openshift-master/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-master/library b/playbooks/common/openshift-master/library deleted file mode 120000 index d0b7393d3..000000000 --- a/playbooks/common/openshift-master/library +++ /dev/null @@ -1 +0,0 @@ -../../../library/
\ No newline at end of file diff --git a/playbooks/common/openshift-master/lookup_plugins b/playbooks/common/openshift-master/lookup_plugins deleted file mode 120000 index ac79701db..000000000 --- a/playbooks/common/openshift-master/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-master/restart.yml b/playbooks/common/openshift-master/restart.yml deleted file mode 100644 index 4d73b8124..000000000 --- a/playbooks/common/openshift-master/restart.yml +++ /dev/null @@ -1,19 +0,0 @@ ---- -- include: validate_restart.yml - -- name: Restart masters -  hosts: oo_masters_to_config -  vars: -    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}" -  serial: 1 -  handlers: -  - include: ../../../roles/openshift_master/handlers/main.yml -    static: yes -  roles: -  - openshift_facts -  post_tasks: -  - include: restart_hosts.yml -    when: openshift_rolling_restart_mode | default('services') == 'system' - -  - include: restart_services.yml -    when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/common/openshift-master/restart_hosts.yml b/playbooks/common/openshift-master/restart_hosts.yml deleted file mode 100644 index a5dbe0590..000000000 --- a/playbooks/common/openshift-master/restart_hosts.yml +++ /dev/null @@ -1,40 +0,0 @@ ---- -- name: Restart master system -  # https://github.com/ansible/ansible/issues/10616 -  shell: sleep 2 && shutdown -r now "OpenShift Ansible master rolling restart" -  async: 1 -  poll: 0 -  ignore_errors: true -  become: yes - -# WARNING: This process is riddled with weird behavior. - -# Workaround for https://github.com/ansible/ansible/issues/21269 -- set_fact: -    wait_for_host: "{{ ansible_host }}" - -# Ansible's blog documents this *without* the port, which appears to now -# just wait until the timeout value and then proceed without checking anything. -# port is now required. -# -# However neither ansible_ssh_port or ansible_port are reliably defined, likely -# only if overridden. Assume a default of 22. -- name: Wait for master to restart -  local_action: -    module: wait_for -      host="{{ wait_for_host }}" -      state=started -      delay=10 -      timeout=600 -      port="{{ ansible_port | default(ansible_ssh_port | default(22,boolean=True),boolean=True) }}" -  become: no - -# Now that ssh is back up we can wait for API on the remote system, -# avoiding some potential connection issues from local system: -- name: Wait for master API to come back online -  wait_for: -    host: "{{ openshift.common.hostname }}" -    state: started -    delay: 10 -    port: "{{ openshift.master.api_port }}" -    timeout: 600 diff --git a/playbooks/common/openshift-master/restart_services.yml b/playbooks/common/openshift-master/restart_services.yml deleted file mode 100644 index 4e1b3a3be..000000000 --- a/playbooks/common/openshift-master/restart_services.yml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include_role: -    name: openshift_master -    tasks_from: restart.yml diff --git a/playbooks/common/openshift-master/revert-client-ca.yml b/playbooks/common/openshift-master/revert-client-ca.yml deleted file mode 100644 index 9ae23bf5b..000000000 --- a/playbooks/common/openshift-master/revert-client-ca.yml +++ /dev/null @@ -1,17 +0,0 @@ ---- -- name: Set servingInfo.clientCA = ca.crt in master config -  hosts: oo_masters_to_config -  tasks: -  - name: Read master config -    slurp: -      src: "{{ openshift.common.config_base }}/master/master-config.yaml" -    register: g_master_config_output - -  # servingInfo.clientCA may be set as the client-ca-bundle.crt from -  # CA redeployment and this task reverts that change. -  - name: Set servingInfo.clientCA = ca.crt in master config -    modify_yaml: -      dest: "{{ openshift.common.config_base }}/master/master-config.yaml" -      yaml_key: servingInfo.clientCA -      yaml_value: ca.crt -    when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt' diff --git a/playbooks/common/openshift-master/roles b/playbooks/common/openshift-master/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/common/openshift-master/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml deleted file mode 100644 index ed54e6ca4..000000000 --- a/playbooks/common/openshift-master/scaleup.yml +++ /dev/null @@ -1,57 +0,0 @@ ---- -- name: Update master count -  hosts: oo_masters:!oo_masters_to_config -  serial: 1 -  roles: -  - openshift_facts -  post_tasks: -  - openshift_facts: -      role: master -      local_facts: -        ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}" -        master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}" -  - name: Update master count -    modify_yaml: -      dest: "{{ openshift.common.config_base}}/master/master-config.yaml" -      yaml_key: 'kubernetesMasterConfig.masterCount' -      yaml_value: "{{ openshift.master.master_count }}" -    notify: -    - restart master api -    - restart master controllers -  handlers: -  - name: restart master api -    service: name={{ openshift.common.service_type }}-master-controllers state=restarted -    notify: verify api server -  # We retry the controllers because the API may not be 100% initialized yet. -  - name: restart master controllers -    command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" -    retries: 3 -    delay: 5 -    register: result -    until: result.rc == 0 -  - name: verify api server -    command: > -      curl --silent --tlsv1.2 -      --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt -      {{ openshift.master.api_url }}/healthz/ready -    args: -      # Disables the following warning: -      # Consider using get_url or uri module rather than running curl -      warn: no -    register: api_available_output -    until: api_available_output.stdout == 'ok' -    retries: 120 -    delay: 1 -    changed_when: false - -- include: ../openshift-master/set_network_facts.yml - -- include: ../../openshift-etcd/private/certificates.yml - -- include: ../openshift-master/config.yml - -- include: ../../openshift-loadbalancer/private/config.yml - -- include: ../../openshift-node/private/certificates.yml - -- include: ../../openshift-node/private/config.yml diff --git a/playbooks/common/openshift-master/set_network_facts.yml b/playbooks/common/openshift-master/set_network_facts.yml deleted file mode 100644 index 9a6cf26fc..000000000 --- a/playbooks/common/openshift-master/set_network_facts.yml +++ /dev/null @@ -1,34 +0,0 @@ ---- -- name: Read first master\'s config -  hosts: oo_first_master -  gather_facts: no -  tasks: -  - stat: -      path: "{{ openshift.common.config_base }}/master/master-config.yaml" -    register: g_master_config_stat -  - slurp: -      src: "{{ openshift.common.config_base }}/master/master-config.yaml" -    register: g_master_config_slurp - -- name: Set network facts for masters -  hosts: oo_masters_to_config -  gather_facts: no -  roles: -  - role: openshift_facts -  post_tasks: -  - block: -    - set_fact: -        osm_cluster_network_cidr: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.clusterNetworkCIDR }}" -      when: osm_cluster_network_cidr is not defined -    - set_fact: -        osm_host_subnet_length: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.hostSubnetLength }}" -      when: osm_host_subnet_length is not defined -    - set_fact: -        openshift_portal_net: "{{ (hostvars[groups.oo_first_master.0].g_master_config_slurp.content|b64decode|from_yaml).networkConfig.serviceNetworkCIDR }}" -      when: openshift_portal_net is not defined -    - openshift_facts: -        role: common -        local_facts: -          portal_net: "{{ openshift_portal_net | default(openshift_master_portal_net) | default(None) }}" -    when: -    - hostvars[groups.oo_first_master.0].g_master_config_stat.stat.exists | bool diff --git a/playbooks/common/openshift-master/tasks/wire_aggregator.yml b/playbooks/common/openshift-master/tasks/wire_aggregator.yml deleted file mode 100644 index 97acc5d5d..000000000 --- a/playbooks/common/openshift-master/tasks/wire_aggregator.yml +++ /dev/null @@ -1,216 +0,0 @@ ---- -- name: Make temp cert dir -  command: mktemp -d /tmp/openshift-service-catalog-ansible-XXXXXX -  register: certtemp -  changed_when: False - -- name: Check for First Master Aggregator Signer cert -  stat: -    path: /etc/origin/master/front-proxy-ca.crt -  register: first_proxy_ca_crt -  changed_when: false -  delegate_to: "{{ groups.oo_first_master.0 }}" - -- name: Check for First Master Aggregator Signer key -  stat: -    path: /etc/origin/master/front-proxy-ca.crt -  register: first_proxy_ca_key -  changed_when: false -  delegate_to: "{{ groups.oo_first_master.0 }}" - -# TODO: this currently has a bug where hostnames are required -- name: Creating First Master Aggregator signer certs -  command: > -    {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm ca create-signer-cert -    --cert=/etc/origin/master/front-proxy-ca.crt -    --key=/etc/origin/master/front-proxy-ca.key -    --serial=/etc/origin/master/ca.serial.txt -  delegate_to: "{{ groups.oo_first_master.0 }}" -  when: -  - not first_proxy_ca_crt.stat.exists -  - not first_proxy_ca_key.stat.exists - -- name: Check for Aggregator Signer cert -  stat: -    path: /etc/origin/master/front-proxy-ca.crt -  register: proxy_ca_crt -  changed_when: false - -- name: Check for Aggregator Signer key -  stat: -    path: /etc/origin/master/front-proxy-ca.crt -  register: proxy_ca_key -  changed_when: false - -- name: Copy Aggregator Signer certs from first master -  fetch: -    src: "/etc/origin/master/{{ item }}" -    dest: "{{ certtemp.stdout }}/{{ item }}" -    flat: yes -  with_items: -  - front-proxy-ca.crt -  - front-proxy-ca.key -  delegate_to: "{{ groups.oo_first_master.0 }}" -  when: -  - not proxy_ca_key.stat.exists -  - not proxy_ca_crt.stat.exists - -- name: Copy Aggregator Signer certs to host -  copy: -    src: "{{ certtemp.stdout }}/{{ item }}" -    dest: "/etc/origin/master/{{ item }}" -  with_items: -  - front-proxy-ca.crt -  - front-proxy-ca.key -  when: -  - not proxy_ca_key.stat.exists -  - not proxy_ca_crt.stat.exists - -#  oc_adm_ca_server_cert: -#    cert: /etc/origin/master/front-proxy-ca.crt -#    key: /etc/origin/master/front-proxy-ca.key - -- name: Check for first master api-client config -  stat: -    path: /etc/origin/master/aggregator-front-proxy.kubeconfig -  register: first_front_proxy_kubeconfig -  delegate_to: "{{ groups.oo_first_master.0 }}" -  run_once: true - -# create-api-client-config generates a ca.crt file which will -# overwrite the OpenShift CA certificate.  Generate the aggregator -# kubeconfig in a temporary directory and then copy files into the -# master config dir to avoid overwriting ca.crt. -- block: -  - name: Create first master api-client config for Aggregator -    command: > -      {{ hostvars[groups.oo_first_master.0].openshift.common.client_binary }} adm create-api-client-config -      --certificate-authority=/etc/origin/master/front-proxy-ca.crt -      --signer-cert=/etc/origin/master/front-proxy-ca.crt -      --signer-key=/etc/origin/master/front-proxy-ca.key -      --user aggregator-front-proxy -      --client-dir={{ certtemp.stdout }} -      --signer-serial=/etc/origin/master/ca.serial.txt -    delegate_to: "{{ groups.oo_first_master.0 }}" -    run_once: true -  - name: Copy first master api-client config for Aggregator -    copy: -      src: "{{ certtemp.stdout }}/{{ item }}" -      dest: "/etc/origin/master/" -      remote_src: true -    with_items: -    - aggregator-front-proxy.crt -    - aggregator-front-proxy.key -    - aggregator-front-proxy.kubeconfig -    delegate_to: "{{ groups.oo_first_master.0 }}" -    run_once: true -  when: -  - not first_front_proxy_kubeconfig.stat.exists - -- name: Check for api-client config -  stat: -    path: /etc/origin/master/aggregator-front-proxy.kubeconfig -  register: front_proxy_kubeconfig - -- name: Copy api-client config from first master -  fetch: -    src: "/etc/origin/master/{{ item }}" -    dest: "{{ certtemp.stdout }}/{{ item }}" -    flat: yes -  delegate_to: "{{ groups.oo_first_master.0 }}" -  with_items: -  - aggregator-front-proxy.crt -  - aggregator-front-proxy.key -  - aggregator-front-proxy.kubeconfig -  when: -  - not front_proxy_kubeconfig.stat.exists - -- name: Copy api-client config to host -  copy: -    src: "{{ certtemp.stdout }}/{{ item }}" -    dest: "/etc/origin/master/{{ item }}" -  with_items: -  - aggregator-front-proxy.crt -  - aggregator-front-proxy.key -  - aggregator-front-proxy.kubeconfig -  when: -  - not front_proxy_kubeconfig.stat.exists - -- name: Delete temp directory -  file: -    name: "{{ certtemp.stdout }}" -    state: absent -  changed_when: False - -- name: Setup extension file for service console UI -  template: -    src: ../templates/openshift-ansible-catalog-console.js -    dest: /etc/origin/master/openshift-ansible-catalog-console.js - -- name: Update master config -  yedit: -    state: present -    src: /etc/origin/master/master-config.yaml -    edits: -    - key: aggregatorConfig.proxyClientInfo.certFile -      value: aggregator-front-proxy.crt -    - key: aggregatorConfig.proxyClientInfo.keyFile -      value: aggregator-front-proxy.key -    - key: authConfig.requestHeader.clientCA -      value: front-proxy-ca.crt -    - key: authConfig.requestHeader.clientCommonNames -      value: [aggregator-front-proxy] -    - key: authConfig.requestHeader.usernameHeaders -      value: [X-Remote-User] -    - key: authConfig.requestHeader.groupHeaders -      value: [X-Remote-Group] -    - key: authConfig.requestHeader.extraHeaderPrefixes -      value: [X-Remote-Extra-] -    - key: assetConfig.extensionScripts -      value: [/etc/origin/master/openshift-ansible-catalog-console.js] -    - key: kubernetesMasterConfig.apiServerArguments.runtime-config -      value: [apis/settings.k8s.io/v1alpha1=true] -    - key: admissionConfig.pluginConfig.PodPreset.configuration.kind -      value: DefaultAdmissionConfig -    - key: admissionConfig.pluginConfig.PodPreset.configuration.apiVersion -      value: v1 -    - key: admissionConfig.pluginConfig.PodPreset.configuration.disable -      value: false -  register: yedit_output - -#restart master serially here -- name: restart master api -  systemd: name={{ openshift.common.service_type }}-master-api state=restarted -  when: -  - yedit_output.changed -  - openshift.master.cluster_method == 'native' - -# We retry the controllers because the API may not be 100% initialized yet. -- name: restart master controllers -  command: "systemctl restart {{ openshift.common.service_type }}-master-controllers" -  retries: 3 -  delay: 5 -  register: result -  until: result.rc == 0 -  when: -  - yedit_output.changed -  - openshift.master.cluster_method == 'native' - -- name: Verify API Server -  # Using curl here since the uri module requires python-httplib2 and -  # wait_for port doesn't provide health information. -  command: > -    curl --silent --tlsv1.2 -    --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt -    {{ openshift.master.api_url }}/healthz/ready -  args: -    # Disables the following warning: -    # Consider using get_url or uri module rather than running curl -    warn: no -  register: api_available_output -  until: api_available_output.stdout == 'ok' -  retries: 120 -  delay: 1 -  changed_when: false -  when: -  - yedit_output.changed diff --git a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js b/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js deleted file mode 100644 index fd02325ba..000000000 --- a/playbooks/common/openshift-master/templates/openshift-ansible-catalog-console.js +++ /dev/null @@ -1 +0,0 @@ -window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = {{ 'true' if (template_service_broker_install | default(True)) else 'false' }}; diff --git a/playbooks/common/openshift-master/validate_restart.yml b/playbooks/common/openshift-master/validate_restart.yml deleted file mode 100644 index 5dbb21502..000000000 --- a/playbooks/common/openshift-master/validate_restart.yml +++ /dev/null @@ -1,65 +0,0 @@ ---- -- name: Validate configuration for rolling restart -  hosts: oo_masters_to_config -  roles: -  - openshift_facts -  tasks: -  - fail: -      msg: "openshift_rolling_restart_mode must be set to either 'services' or 'system'" -    when: openshift_rolling_restart_mode is defined and openshift_rolling_restart_mode not in ["services", "system"] -  - openshift_facts: -      role: "{{ item.role }}" -      local_facts: "{{ item.local_facts }}" -    with_items: -    - role: common -      local_facts: -        rolling_restart_mode: "{{ openshift_rolling_restart_mode | default('services') }}" -    - role: master -      local_facts: -        cluster_method: "{{ openshift_master_cluster_method | default(None) }}" - -# Creating a temp file on localhost, we then check each system that will -# be rebooted to see if that file exists, if so we know we're running -# ansible on a machine that needs a reboot, and we need to error out. -- name: Create temp file on localhost -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - local_action: command mktemp -    register: mktemp -    changed_when: false - -- name: Check if temp file exists on any masters -  hosts: oo_masters_to_config -  tasks: -  - stat: path="{{ hostvars.localhost.mktemp.stdout }}" -    register: exists -    changed_when: false - -- name: Cleanup temp file on localhost -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - file: path="{{ hostvars.localhost.mktemp.stdout }}" state=absent -    changed_when: false - -- name: Warn if restarting the system where ansible is running -  hosts: oo_masters_to_config -  tasks: -  - pause: -      prompt: > -        Warning: Running playbook from a host that will be restarted! -        Press CTRL+C and A to abort playbook execution. You may -        continue by pressing ENTER but the playbook will stop -        executing after this system has been restarted and services -        must be verified manually. To only restart services, set -        openshift_master_rolling_restart_mode=services in host -        inventory and relaunch the playbook. -    when: exists.stat.exists and openshift.common.rolling_restart_mode == 'system' -  - set_fact: -      current_host: "{{ exists.stat.exists }}" -    when: openshift.common.rolling_restart_mode == 'system' | 
