summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/cockpit/tasks/main.yml3
-rw-r--r--roles/contiv/tasks/main.yml6
-rw-r--r--roles/contiv/tasks/netmaster.yml8
-rw-r--r--roles/contiv/tasks/netplugin.yml6
-rw-r--r--roles/contiv/tasks/ovs.yml2
-rw-r--r--roles/contiv/tasks/packageManagerInstall.yml2
-rw-r--r--roles/contiv_facts/tasks/main.yml4
-rw-r--r--roles/docker/tasks/main.yml8
-rw-r--r--roles/docker/tasks/package_docker.yml7
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml4
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml2
-rw-r--r--roles/etcd/defaults/main.yaml10
-rw-r--r--roles/etcd/tasks/backup.archive.yml4
-rw-r--r--roles/etcd/tasks/backup.copy.yml4
-rw-r--r--roles/etcd/tasks/backup.fetch.yml4
-rw-r--r--roles/etcd/tasks/backup.force_new_cluster.yml4
-rw-r--r--roles/etcd/tasks/backup.unarchive.yml4
-rw-r--r--roles/etcd/tasks/backup.yml2
-rw-r--r--roles/etcd/tasks/backup/backup.yml2
-rw-r--r--roles/etcd/tasks/backup_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_generated_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_master_etcd_certificates.yml2
-rw-r--r--roles/etcd/tasks/backup_server_certificates.yml2
-rw-r--r--roles/etcd/tasks/ca.yml2
-rw-r--r--roles/etcd/tasks/check_cluster_health.yml2
-rw-r--r--roles/etcd/tasks/clean_data.yml2
-rw-r--r--roles/etcd/tasks/client_certificates.yml2
-rw-r--r--roles/etcd/tasks/disable_etcd.yml2
-rw-r--r--roles/etcd/tasks/distribute_ca2
-rw-r--r--roles/etcd/tasks/distribute_ca.yml2
-rw-r--r--roles/etcd/tasks/drop_etcdctl.yml2
-rw-r--r--roles/etcd/tasks/fetch_backup.yml8
-rw-r--r--roles/etcd/tasks/main.yml17
-rw-r--r--roles/etcd/tasks/migrate.add_ttls.yml2
-rw-r--r--roles/etcd/tasks/migrate.configure_master.yml2
-rw-r--r--roles/etcd/tasks/migrate.pre_check.yml2
-rw-r--r--roles/etcd/tasks/migrate.yml2
-rw-r--r--roles/etcd/tasks/migration/check.yml8
-rw-r--r--roles/etcd/tasks/remove_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/remove_generated_certificates.yml2
-rw-r--r--roles/etcd/tasks/restart.yml21
-rw-r--r--roles/etcd/tasks/retrieve_ca_certificates.yml2
-rw-r--r--roles/etcd/tasks/server_certificates.yml4
-rw-r--r--roles/etcd/tasks/system_container.yml4
-rw-r--r--roles/etcd/tasks/upgrade_image.yml2
-rw-r--r--roles/etcd/tasks/upgrade_rpm.yml2
-rw-r--r--roles/etcd/tasks/version_detect.yml55
-rw-r--r--roles/etcd/templates/etcd.docker.service2
-rw-r--r--roles/installer_checkpoint/README.md6
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py24
-rw-r--r--roles/kuryr/tasks/master.yaml4
-rw-r--r--roles/nuage_master/tasks/main.yaml7
-rw-r--r--roles/nuage_node/tasks/main.yaml7
-rw-r--r--roles/openshift_certificate_expiry/README.md48
-rw-r--r--roles/openshift_cli/defaults/main.yml5
-rw-r--r--roles/openshift_cli/tasks/main.yml4
-rw-r--r--roles/openshift_cloud_provider/tasks/main.yml6
-rw-r--r--roles/openshift_etcd_facts/tasks/main.yml4
-rw-r--r--roles/openshift_excluder/tasks/disable.yml10
-rw-r--r--roles/openshift_excluder/tasks/enable.yml4
-rw-r--r--roles/openshift_excluder/tasks/main.yml2
-rw-r--r--roles/openshift_excluder/tasks/verify_upgrade.yml4
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py7
-rw-r--r--roles/openshift_hosted/tasks/storage/glusterfs.yml11
-rw-r--r--roles/openshift_loadbalancer/tasks/main.yml3
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j22
-rw-r--r--roles/openshift_logging_fluentd/files/secure-forward.conf2
-rw-r--r--roles/openshift_logging_mux/files/secure-forward.conf2
-rw-r--r--roles/openshift_logging_mux/templates/mux.j22
-rw-r--r--roles/openshift_master/defaults/main.yml6
-rw-r--r--roles/openshift_master/tasks/main.yml17
-rw-r--r--roles/openshift_master/tasks/system_container.yml6
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_master/tasks/upgrade.yml10
-rw-r--r--roles/openshift_master_cluster/tasks/main.yml2
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml4
-rw-r--r--roles/openshift_metrics/tasks/install_heapster.yaml2
-rw-r--r--roles/openshift_metrics/tasks/install_metrics.yaml18
-rw-r--r--roles/openshift_metrics/tasks/install_support.yaml8
-rw-r--r--roles/openshift_metrics/tasks/main.yaml6
-rw-r--r--roles/openshift_metrics/tasks/uninstall_metrics.yaml2
-rw-r--r--roles/openshift_node/defaults/main.yml9
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml3
-rw-r--r--roles/openshift_node/tasks/config.yml5
-rw-r--r--roles/openshift_node/tasks/install.yml2
-rw-r--r--roles/openshift_node/tasks/main.yml21
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml4
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml22
-rw-r--r--roles/openshift_node/tasks/upgrade.yml12
-rw-r--r--roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml2
-rw-r--r--roles/openshift_node_dnsmasq/tasks/main.yml4
-rw-r--r--roles/openshift_node_dnsmasq/tasks/no-network-manager.yml2
-rw-r--r--roles/openshift_node_group/defaults/main.yml2
-rw-r--r--roles/openshift_node_group/tasks/main.yml2
-rw-r--r--roles/openshift_node_group/templates/node-config.yaml.j22
-rw-r--r--roles/openshift_prometheus/README.md7
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml3
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml22
-rw-r--r--roles/openshift_prometheus/tasks/main.yaml8
-rw-r--r--roles/openshift_prometheus/tasks/uninstall_prometheus.yaml7
-rw-r--r--roles/openshift_prometheus/templates/prometheus.j25
-rw-r--r--roles/openshift_provisioners/tasks/install_provisioners.yaml8
-rw-r--r--roles/openshift_provisioners/tasks/install_support.yaml6
-rw-r--r--roles/openshift_provisioners/tasks/main.yaml4
-rw-r--r--roles/openshift_provisioners/tasks/uninstall_provisioners.yaml2
-rw-r--r--roles/openshift_repos/tasks/main.yaml2
-rw-r--r--roles/openshift_sanitize_inventory/tasks/deprecations.yml2
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml4
-rw-r--r--roles/openshift_storage_glusterfs/README.md4
-rw-r--r--roles/openshift_storage_glusterfs/defaults/main.yml8
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml2
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml133
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml67
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml140
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml105
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml154
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml4
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j28
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j242
-rw-r--r--roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j249
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml3
-rw-r--r--roles/openshift_version/tasks/main.yml4
-rw-r--r--roles/os_firewall/tasks/main.yml4
135 files changed, 1306 insertions, 302 deletions
diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml
index 066ee3f3b..34754502a 100644
--- a/roles/cockpit/tasks/main.yml
+++ b/roles/cockpit/tasks/main.yml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install cockpit-ws
package: name={{ item }} state=present
diff --git a/roles/contiv/tasks/main.yml b/roles/contiv/tasks/main.yml
index 40a0f9e61..cb9196a71 100644
--- a/roles/contiv/tasks/main.yml
+++ b/roles/contiv/tasks/main.yml
@@ -5,10 +5,10 @@
recurse: yes
state: directory
-- include: download_bins.yml
+- include_tasks: download_bins.yml
-- include: netmaster.yml
+- include_tasks: netmaster.yml
when: contiv_role == "netmaster"
-- include: netplugin.yml
+- include_tasks: netplugin.yml
when: contiv_role == "netplugin"
diff --git a/roles/contiv/tasks/netmaster.yml b/roles/contiv/tasks/netmaster.yml
index cc52d3a43..6f15af8c2 100644
--- a/roles/contiv/tasks/netmaster.yml
+++ b/roles/contiv/tasks/netmaster.yml
@@ -1,8 +1,8 @@
---
-- include: netmaster_firewalld.yml
+- include_tasks: netmaster_firewalld.yml
when: has_firewalld
-- include: netmaster_iptables.yml
+- include_tasks: netmaster_iptables.yml
when: not has_firewalld and has_iptables
- name: Netmaster | Check is /etc/hosts file exists
@@ -70,8 +70,8 @@
state: started
register: netmaster_started
-- include: aci.yml
+- include_tasks: aci.yml
when: contiv_fabric_mode == "aci"
-- include: default_network.yml
+- include_tasks: default_network.yml
when: contiv_default_network == true
diff --git a/roles/contiv/tasks/netplugin.yml b/roles/contiv/tasks/netplugin.yml
index e861a2591..0b2f91bab 100644
--- a/roles/contiv/tasks/netplugin.yml
+++ b/roles/contiv/tasks/netplugin.yml
@@ -1,8 +1,8 @@
---
-- include: netplugin_firewalld.yml
+- include_tasks: netplugin_firewalld.yml
when: has_firewalld
-- include: netplugin_iptables.yml
+- include_tasks: netplugin_iptables.yml
when: has_iptables
- name: Netplugin | Ensure localhost entry correct in /etc/hosts
@@ -19,7 +19,7 @@
line: '::1 '
state: absent
-- include: ovs.yml
+- include_tasks: ovs.yml
when: netplugin_driver == "ovs"
- name: Netplugin | Create Netplugin bin symlink
diff --git a/roles/contiv/tasks/ovs.yml b/roles/contiv/tasks/ovs.yml
index 0c1b994c7..5c92e90e9 100644
--- a/roles/contiv/tasks/ovs.yml
+++ b/roles/contiv/tasks/ovs.yml
@@ -1,5 +1,5 @@
---
-- include: packageManagerInstall.yml
+- include_tasks: packageManagerInstall.yml
when: source_type == "packageManager"
tags:
- binary-update
diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml
index e0d48e643..d5726476c 100644
--- a/roles/contiv/tasks/packageManagerInstall.yml
+++ b/roles/contiv/tasks/packageManagerInstall.yml
@@ -3,7 +3,7 @@
set_fact:
did_install: false
-- include: pkgMgrInstallers/centos-install.yml
+- include_tasks: pkgMgrInstallers/centos-install.yml
when: (ansible_os_family == "RedHat") and
not is_atomic
diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml
index 7a4972fca..3267a4ab0 100644
--- a/roles/contiv_facts/tasks/main.yml
+++ b/roles/contiv_facts/tasks/main.yml
@@ -81,8 +81,8 @@
has_iptables: false
# collect information about what packages are installed
-- include: rpm.yml
+- include_tasks: rpm.yml
when: has_rpm
-- include: fedora-install.yml
+- include_tasks: fedora-install.yml
when: not is_atomic and ansible_distribution == "Fedora"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index 1050de0fb..b02a74711 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -2,7 +2,7 @@
# These tasks dispatch to the proper set of docker tasks based on the
# inventory:openshift_docker_use_system_container variable
-- include: udev_workaround.yml
+- include_tasks: udev_workaround.yml
when: docker_udev_workaround | default(False) | bool
- set_fact:
@@ -20,7 +20,7 @@
- not l_use_crio_only
- name: Use Package Docker if Requested
- include: package_docker.yml
+ include_tasks: package_docker.yml
when:
- not l_use_system_container
- not l_use_crio_only
@@ -35,13 +35,13 @@
changed_when: false
- name: Use System Container Docker if Requested
- include: systemcontainer_docker.yml
+ include_tasks: systemcontainer_docker.yml
when:
- l_use_system_container
- not l_use_crio_only
- name: Add CRI-O usage Requested
- include: systemcontainer_crio.yml
+ include_tasks: systemcontainer_crio.yml
when:
- l_use_crio
- openshift_docker_is_node_or_master | bool
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index 8121163a6..5437275a2 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -1,6 +1,6 @@
---
- name: Get current installed Docker version
- command: "{{ repoquery_cmd }} --installed --qf '%{version}' docker"
+ command: "{{ repoquery_installed }} --qf '%{version}' docker"
when: not openshift.common.is_atomic | bool
register: curr_docker_version
retries: 4
@@ -33,9 +33,10 @@
# Make sure Docker is installed, but does not update a running version.
# Docker upgrades are handled by a separate playbook.
+# Note: The curr_docker_version.stdout check can be removed when https://github.com/ansible/ansible/issues/33187 gets fixed.
- name: Install Docker
package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present
- when: not openshift.common.is_atomic | bool
+ when: not openshift.common.is_atomic | bool and not curr_docker_version | skipped and not curr_docker_version.stdout != ''
- block:
# Extend the default Docker service unit file when using iptables-services
@@ -157,4 +158,4 @@
- meta: flush_handlers
# This needs to run after docker is restarted to account for proxy settings.
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 3fe10454d..3e5bdf32c 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -162,7 +162,7 @@
state: directory
- name: setup firewall for CRI-O
- include: crio_firewall.yml
+ include_tasks: crio_firewall.yml
static: yes
- name: Configure the CNI network
@@ -182,6 +182,6 @@
# If we are using crio only, docker.service might not be available for
# 'docker login'
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
vars:
openshift_docker_alternative_creds: "{{ l_use_crio_only }}"
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 84220fa66..f69acb9a5 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -177,6 +177,6 @@
# Since docker is running as a system container, docker login will fail to create
# credentials. Use alternate method if requiring authenticated registries.
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
vars:
openshift_docker_alternative_creds: True
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 4b734d4ed..a069e4d87 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -2,10 +2,18 @@
r_etcd_common_backup_tag: ''
r_etcd_common_backup_sufix_name: ''
+l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
# runc, docker, host
-r_etcd_common_etcd_runtime: "docker"
+r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}"
r_etcd_common_embedded_etcd: false
+osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd'
+etcd_image_dict:
+ origin: "registry.fedoraproject.org/f26/etcd"
+ openshift-enterprise: "{{ osm_etcd_image }}"
+etcd_image: "{{ etcd_image_dict[openshift_deployment_type | default('origin')] }}"
+
# etcd run on a host => use etcdctl command directly
# etcd run as a docker container => use docker exec
# etcd run as a runc container => use runc exec
diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml
index 6daa6dc51..a29a90ea3 100644
--- a/roles/etcd/tasks/backup.archive.yml
+++ b/roles/etcd/tasks/backup.archive.yml
@@ -1,3 +1,3 @@
---
-- include: backup/vars.yml
-- include: backup/archive.yml
+- include_tasks: backup/vars.yml
+- include_tasks: backup/archive.yml
diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml
index cc540cbca..6e8502e3b 100644
--- a/roles/etcd/tasks/backup.copy.yml
+++ b/roles/etcd/tasks/backup.copy.yml
@@ -1,3 +1,3 @@
---
-- include: backup/vars.yml
-- include: backup/copy.yml
+- include_tasks: backup/vars.yml
+- include_tasks: backup/copy.yml
diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml
index 26ec15043..d33878804 100644
--- a/roles/etcd/tasks/backup.fetch.yml
+++ b/roles/etcd/tasks/backup.fetch.yml
@@ -1,3 +1,3 @@
---
-- include: backup/vars.yml
-- include: backup/fetch.yml
+- include_tasks: backup/vars.yml
+- include_tasks: backup/fetch.yml
diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml
index d2e866416..7dd0899ee 100644
--- a/roles/etcd/tasks/backup.force_new_cluster.yml
+++ b/roles/etcd/tasks/backup.force_new_cluster.yml
@@ -1,5 +1,5 @@
---
-- include: backup/vars.yml
+- include_tasks: backup/vars.yml
- name: Move content of etcd backup under the etcd data directory
command: >
@@ -9,4 +9,4 @@
command: >
chown -R etcd:etcd "{{ etcd_data_dir }}"
-- include: auxiliary/force_new_cluster.yml
+- include_tasks: auxiliary/force_new_cluster.yml
diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml
index 77a637360..f92e87c3d 100644
--- a/roles/etcd/tasks/backup.unarchive.yml
+++ b/roles/etcd/tasks/backup.unarchive.yml
@@ -1,3 +1,3 @@
---
-- include: backup/vars.yml
-- include: backup/unarchive.yml
+- include_tasks: backup/vars.yml
+- include_tasks: backup/unarchive.yml
diff --git a/roles/etcd/tasks/backup.yml b/roles/etcd/tasks/backup.yml
index c0538e596..60bb82100 100644
--- a/roles/etcd/tasks/backup.yml
+++ b/roles/etcd/tasks/backup.yml
@@ -1,2 +1,2 @@
---
-- include: backup/backup.yml
+- include_tasks: backup/backup.yml
diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml
index ca0d29155..afb84eb58 100644
--- a/roles/etcd/tasks/backup/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -1,5 +1,5 @@
---
-- include: vars.yml
+- include_tasks: vars.yml
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
diff --git a/roles/etcd/tasks/backup_ca_certificates.yml b/roles/etcd/tasks/backup_ca_certificates.yml
index a41b032f3..c87359900 100644
--- a/roles/etcd/tasks/backup_ca_certificates.yml
+++ b/roles/etcd/tasks/backup_ca_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/backup_ca_certificates.yml
+- include_tasks: certificates/backup_ca_certificates.yml
diff --git a/roles/etcd/tasks/backup_generated_certificates.yml b/roles/etcd/tasks/backup_generated_certificates.yml
index 8cf2a10cc..fa73ea590 100644
--- a/roles/etcd/tasks/backup_generated_certificates.yml
+++ b/roles/etcd/tasks/backup_generated_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/backup_generated_certificates.yml
+- include_tasks: certificates/backup_generated_certificates.yml
diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml
index 129e1831c..5526825fa 100644
--- a/roles/etcd/tasks/backup_master_etcd_certificates.yml
+++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/backup_master_etcd_certificates.yml
+- include_tasks: certificates/backup_master_etcd_certificates.yml
diff --git a/roles/etcd/tasks/backup_server_certificates.yml b/roles/etcd/tasks/backup_server_certificates.yml
index 267ffeb4d..5f3052be1 100644
--- a/roles/etcd/tasks/backup_server_certificates.yml
+++ b/roles/etcd/tasks/backup_server_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/backup_server_certificates.yml
+- include_tasks: certificates/backup_server_certificates.yml
diff --git a/roles/etcd/tasks/ca.yml b/roles/etcd/tasks/ca.yml
index cca1e9ad7..dd4b59e24 100644
--- a/roles/etcd/tasks/ca.yml
+++ b/roles/etcd/tasks/ca.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/deploy_ca.yml
+- include_tasks: certificates/deploy_ca.yml
diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml
index 75c110972..3410528eb 100644
--- a/roles/etcd/tasks/check_cluster_health.yml
+++ b/roles/etcd/tasks/check_cluster_health.yml
@@ -1,2 +1,2 @@
---
-- include: migration/check_cluster_health.yml
+- include_tasks: migration/check_cluster_health.yml
diff --git a/roles/etcd/tasks/clean_data.yml b/roles/etcd/tasks/clean_data.yml
index d131ffd21..12538c2d0 100644
--- a/roles/etcd/tasks/clean_data.yml
+++ b/roles/etcd/tasks/clean_data.yml
@@ -1,2 +1,2 @@
---
-- include: auxiliary/clean_data.yml
+- include_tasks: auxiliary/clean_data.yml
diff --git a/roles/etcd/tasks/client_certificates.yml b/roles/etcd/tasks/client_certificates.yml
index 2f4108a0d..f3201816d 100644
--- a/roles/etcd/tasks/client_certificates.yml
+++ b/roles/etcd/tasks/client_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/fetch_client_certificates_from_ca.yml
+- include_tasks: certificates/fetch_client_certificates_from_ca.yml
diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml
index 9202e6e48..55fb7f6ea 100644
--- a/roles/etcd/tasks/disable_etcd.yml
+++ b/roles/etcd/tasks/disable_etcd.yml
@@ -1,2 +1,2 @@
---
-- include: auxiliary/disable_etcd.yml
+- include_tasks: auxiliary/disable_etcd.yml
diff --git a/roles/etcd/tasks/distribute_ca b/roles/etcd/tasks/distribute_ca
deleted file mode 100644
index 040c5f7af..000000000
--- a/roles/etcd/tasks/distribute_ca
+++ /dev/null
@@ -1,2 +0,0 @@
----
-- include: certificates/distribute_ca.yml
diff --git a/roles/etcd/tasks/distribute_ca.yml b/roles/etcd/tasks/distribute_ca.yml
new file mode 100644
index 000000000..7d2607844
--- /dev/null
+++ b/roles/etcd/tasks/distribute_ca.yml
@@ -0,0 +1,2 @@
+---
+- include_tasks: certificates/distribute_ca.yml
diff --git a/roles/etcd/tasks/drop_etcdctl.yml b/roles/etcd/tasks/drop_etcdctl.yml
index 4c1f609f7..3258ab1a8 100644
--- a/roles/etcd/tasks/drop_etcdctl.yml
+++ b/roles/etcd/tasks/drop_etcdctl.yml
@@ -1,2 +1,2 @@
---
-- include: auxiliary/drop_etcdctl.yml
+- include_tasks: auxiliary/drop_etcdctl.yml
diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml
index 513eed17a..a28db3d66 100644
--- a/roles/etcd/tasks/fetch_backup.yml
+++ b/roles/etcd/tasks/fetch_backup.yml
@@ -1,8 +1,8 @@
---
-- include: backup/vars.yml
+- include_tasks: backup/vars.yml
-- include: backup/archive.yml
+- include_tasks: backup/archive.yml
-- include: backup/sync_backup.yml
+- include_tasks: backup/sync_backup.yml
-- include: backup/
+- include_tasks: backup/
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 3e69af314..5ee9335f5 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -7,20 +7,19 @@
etcd_ip: "{{ etcd_ip }}"
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install etcd
package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present
when: not etcd_is_containerized | bool
-- include: drop_etcdctl.yml
+- include_tasks: drop_etcdctl.yml
when:
- openshift_etcd_etcdctl_profile | default(true) | bool
- block:
- name: Pull etcd container
- command: docker pull {{ openshift.etcd.etcd_image }}
+ command: docker pull {{ etcd_image }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
@@ -30,7 +29,7 @@
src: etcd.docker.service
when:
- etcd_is_containerized | bool
- - not openshift.common.is_etcd_system_container | bool
+ - not l_is_etcd_system_container | bool
# Start secondary etcd instance for third party integrations
# TODO: Determine an alternative to using thirdparty variable
@@ -90,7 +89,7 @@
enabled: no
masked: yes
daemon_reload: yes
- when: not openshift.common.is_etcd_system_container | bool
+ when: not l_is_etcd_system_container | bool
register: task_result
failed_when: task_result|failed and 'could not' not in task_result.msg|lower
@@ -98,11 +97,11 @@
template:
dest: "/etc/systemd/system/etcd_container.service"
src: etcd.docker.service
- when: not openshift.common.is_etcd_system_container | bool
+ when: not l_is_etcd_system_container | bool
- name: Install Etcd system container
- include: system_container.yml
- when: openshift.common.is_etcd_system_container | bool
+ include_tasks: system_container.yml
+ when: l_is_etcd_system_container | bool
when: etcd_is_containerized | bool
- name: Validate permissions on the config dir
diff --git a/roles/etcd/tasks/migrate.add_ttls.yml b/roles/etcd/tasks/migrate.add_ttls.yml
index bc27e4ea1..1dd3c9269 100644
--- a/roles/etcd/tasks/migrate.add_ttls.yml
+++ b/roles/etcd/tasks/migrate.add_ttls.yml
@@ -1,2 +1,2 @@
---
-- include: migration/add_ttls.yml
+- include_tasks: migration/add_ttls.yml
diff --git a/roles/etcd/tasks/migrate.configure_master.yml b/roles/etcd/tasks/migrate.configure_master.yml
index 3ada6e362..5be9cebd7 100644
--- a/roles/etcd/tasks/migrate.configure_master.yml
+++ b/roles/etcd/tasks/migrate.configure_master.yml
@@ -1,2 +1,2 @@
---
-- include: migration/configure_master.yml
+- include_tasks: migration/configure_master.yml
diff --git a/roles/etcd/tasks/migrate.pre_check.yml b/roles/etcd/tasks/migrate.pre_check.yml
index 124d21561..4cb67d322 100644
--- a/roles/etcd/tasks/migrate.pre_check.yml
+++ b/roles/etcd/tasks/migrate.pre_check.yml
@@ -1,2 +1,2 @@
---
-- include: migration/check.yml
+- include_tasks: migration/check.yml
diff --git a/roles/etcd/tasks/migrate.yml b/roles/etcd/tasks/migrate.yml
index 5d5385873..1a75f63f1 100644
--- a/roles/etcd/tasks/migrate.yml
+++ b/roles/etcd/tasks/migrate.yml
@@ -1,2 +1,2 @@
---
-- include: migration/migrate.yml
+- include_tasks: migration/migrate.yml
diff --git a/roles/etcd/tasks/migration/check.yml b/roles/etcd/tasks/migration/check.yml
index 5c45e5ae1..8ef81da28 100644
--- a/roles/etcd/tasks/migration/check.yml
+++ b/roles/etcd/tasks/migration/check.yml
@@ -1,7 +1,7 @@
---
# Check the cluster is healthy
-- include: check_cluster_health.yml
+- include_tasks: check_cluster_health.yml
# Check if there is at least one v2 snapshot
- name: Check if there is at least one v2 snapshot
@@ -39,7 +39,7 @@
# - with_items not supported over block
# Check the cluster status for the first time
-- include: check_cluster_status.yml
+- include_tasks: check_cluster_status.yml
# Check the cluster status for the second time
- block:
@@ -50,7 +50,7 @@
seconds: 5
when: not l_etcd_cluster_status_ok | bool
- - include: check_cluster_status.yml
+ - include_tasks: check_cluster_status.yml
when: not l_etcd_cluster_status_ok | bool
@@ -63,5 +63,5 @@
seconds: 5
when: not l_etcd_cluster_status_ok | bool
- - include: check_cluster_status.yml
+ - include_tasks: check_cluster_status.yml
when: not l_etcd_cluster_status_ok | bool
diff --git a/roles/etcd/tasks/remove_ca_certificates.yml b/roles/etcd/tasks/remove_ca_certificates.yml
index 36df1a1cc..c1ea4e6c9 100644
--- a/roles/etcd/tasks/remove_ca_certificates.yml
+++ b/roles/etcd/tasks/remove_ca_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/remove_ca_certificates.yml
+- include_tasks: certificates/remove_ca_certificates.yml
diff --git a/roles/etcd/tasks/remove_generated_certificates.yml b/roles/etcd/tasks/remove_generated_certificates.yml
index b10a4b32d..8cdeea187 100644
--- a/roles/etcd/tasks/remove_generated_certificates.yml
+++ b/roles/etcd/tasks/remove_generated_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/remove_generated_certificates.yml
+- include_tasks: certificates/remove_generated_certificates.yml
diff --git a/roles/etcd/tasks/restart.yml b/roles/etcd/tasks/restart.yml
new file mode 100644
index 000000000..d4a016eec
--- /dev/null
+++ b/roles/etcd/tasks/restart.yml
@@ -0,0 +1,21 @@
+---
+
+- name: restart etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: restarted
+ when:
+ - not g_etcd_certificates_expired | default(false) | bool
+
+- name: stop etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: stopped
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
+- name: start etcd
+ service:
+ name: "{{ etcd_service }}"
+ state: started
+ when:
+ - g_etcd_certificates_expired | default(false) | bool
diff --git a/roles/etcd/tasks/retrieve_ca_certificates.yml b/roles/etcd/tasks/retrieve_ca_certificates.yml
index bd6c4ec85..2184e669c 100644
--- a/roles/etcd/tasks/retrieve_ca_certificates.yml
+++ b/roles/etcd/tasks/retrieve_ca_certificates.yml
@@ -1,2 +1,2 @@
---
-- include: certificates/retrieve_ca_certificates.yml
+- include_tasks: certificates/retrieve_ca_certificates.yml
diff --git a/roles/etcd/tasks/server_certificates.yml b/roles/etcd/tasks/server_certificates.yml
index ae26079f9..75c35d59e 100644
--- a/roles/etcd/tasks/server_certificates.yml
+++ b/roles/etcd/tasks/server_certificates.yml
@@ -1,6 +1,6 @@
---
-- include: ca.yml
+- include_tasks: ca.yml
when:
- etcd_ca_setup | default(True) | bool
-- include: certificates/fetch_server_certificates_from_ca.yml
+- include_tasks: certificates/fetch_server_certificates_from_ca.yml
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index f71d9b551..82ac4fc84 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -5,7 +5,7 @@
tasks_from: proxy
- name: Pull etcd system container
- command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
+ command: atomic pull --storage=ostree {{ etcd_image }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
@@ -57,7 +57,7 @@
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
- image: "{{ openshift.etcd.etcd_image }}"
+ image: "{{ etcd_image }}"
state: latest
values:
- ETCD_DATA_DIR=/var/lib/etcd
diff --git a/roles/etcd/tasks/upgrade_image.yml b/roles/etcd/tasks/upgrade_image.yml
index 9e69027eb..35385cb9a 100644
--- a/roles/etcd/tasks/upgrade_image.yml
+++ b/roles/etcd/tasks/upgrade_image.yml
@@ -1,2 +1,2 @@
---
-- include: upgrade/upgrade_image.yml
+- include_tasks: upgrade/upgrade_image.yml
diff --git a/roles/etcd/tasks/upgrade_rpm.yml b/roles/etcd/tasks/upgrade_rpm.yml
index 29603d2b6..fbd3cd919 100644
--- a/roles/etcd/tasks/upgrade_rpm.yml
+++ b/roles/etcd/tasks/upgrade_rpm.yml
@@ -1,2 +1,2 @@
---
-- include: upgrade/upgrade_rpm.yml
+- include_tasks: upgrade/upgrade_rpm.yml
diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml
new file mode 100644
index 000000000..fe1e418d8
--- /dev/null
+++ b/roles/etcd/tasks/version_detect.yml
@@ -0,0 +1,55 @@
+---
+- block:
+ - name: Record RPM based etcd version
+ command: rpm -qa --qf '%{version}' etcd\*
+ args:
+ warn: no
+ register: etcd_rpm_version
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ - debug:
+ msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"
+ when:
+ - not openshift.common.is_containerized | bool
+
+- block:
+ - name: Record containerized etcd version (docker)
+ command: docker exec etcd_container rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_docker
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - not l_is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_docker.stdout }}"
+ when:
+ - not l_is_etcd_system_container | bool
+
+ - name: Record containerized etcd version (runc)
+ command: runc exec etcd rpm -qa --qf '%{version}' etcd\*
+ register: etcd_container_version_runc
+ failed_when: false
+ # AUDIT:changed_when: `false` because we are only inspecting
+ # state, not manipulating anything
+ changed_when: false
+ when:
+ - l_is_etcd_system_container | bool
+
+ # Given a register variables is set even if the whwen condition
+ # is false, we need to set etcd_container_version separately
+ - set_fact:
+ etcd_container_version: "{{ etcd_container_version_runc.stdout }}"
+ when:
+ - l_is_etcd_system_container | bool
+
+ - debug:
+ msg: "Etcd containerized version {{ etcd_container_version }} detected"
+ when:
+ - openshift.common.is_containerized | bool
diff --git a/roles/etcd/templates/etcd.docker.service b/roles/etcd/templates/etcd.docker.service
index adeca7a91..99ae37319 100644
--- a/roles/etcd/templates/etcd.docker.service
+++ b/roles/etcd/templates/etcd.docker.service
@@ -7,7 +7,7 @@ PartOf={{ openshift.docker.service_name }}.service
[Service]
EnvironmentFile={{ etcd_conf_file }}
ExecStartPre=-/usr/bin/docker rm -f {{ etcd_service }}
-ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ openshift.etcd.etcd_image }}
+ExecStart=/usr/bin/docker run --name {{ etcd_service }} --rm -v {{ etcd_data_dir }}:{{ etcd_data_dir }}:z -v {{ etcd_conf_dir }}:{{ etcd_conf_dir }}:ro --env-file={{ etcd_conf_file }} --net=host --entrypoint=/usr/bin/etcd {{ etcd_image }}
ExecStop=/usr/bin/docker stop {{ etcd_service }}
SyslogIdentifier=etcd_container
Restart=always
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md
index f8588c4bf..68c0357b6 100644
--- a/roles/installer_checkpoint/README.md
+++ b/roles/installer_checkpoint/README.md
@@ -64,11 +64,11 @@ phase are stored in the `phase_attributes` variable.
},
'installer_phase_etcd': {
'title': 'etcd Install',
- 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ 'playbook': 'playbooks/openshift-etcd/config.yml'
},
'installer_phase_nfs': {
'title': 'NFS Install',
- 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ 'playbook': 'playbooks/openshift-nfs/config.yml'
},
#...
}
@@ -160,7 +160,7 @@ Health Check : Complete (0:01:10)
etcd Install : Complete (0:02:58)
Master Install : Complete (0:09:20)
Master Additional Install : In Progress (0:20:04)
- This phase can be restarted by running: playbooks/byo/openshift-master/additional_config.yml
+ This phase can be restarted by running: playbooks/openshift-master/additional_config.yml
```
[set_stats]: http://docs.ansible.com/ansible/latest/set_stats_module.html
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 556e9127f..57444a2a5 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -46,31 +46,31 @@ class CallbackModule(CallbackBase):
},
'installer_phase_health': {
'title': 'Health Check',
- 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml'
+ 'playbook': 'playbooks/openshift-checks/pre-install.yml'
},
'installer_phase_etcd': {
'title': 'etcd Install',
- 'playbook': 'playbooks/byo/openshift-etcd/config.yml'
+ 'playbook': 'playbooks/openshift-etcd/config.yml'
},
'installer_phase_nfs': {
'title': 'NFS Install',
- 'playbook': 'playbooks/byo/openshift-nfs/config.yml'
+ 'playbook': 'playbooks/openshift-nfs/config.yml'
},
'installer_phase_loadbalancer': {
'title': 'Load balancer Install',
- 'playbook': 'playbooks/byo/openshift-loadbalancer/config.yml'
+ 'playbook': 'playbooks/openshift-loadbalancer/config.yml'
},
'installer_phase_master': {
'title': 'Master Install',
- 'playbook': 'playbooks/byo/openshift-master/config.yml'
+ 'playbook': 'playbooks/openshift-master/config.yml'
},
'installer_phase_master_additional': {
'title': 'Master Additional Install',
- 'playbook': 'playbooks/byo/openshift-master/additional_config.yml'
+ 'playbook': 'playbooks/openshift-master/additional_config.yml'
},
'installer_phase_node': {
'title': 'Node Install',
- 'playbook': 'playbooks/byo/openshift-node/config.yml'
+ 'playbook': 'playbooks/openshift-node/config.yml'
},
'installer_phase_glusterfs': {
'title': 'GlusterFS Install',
@@ -78,11 +78,11 @@ class CallbackModule(CallbackBase):
},
'installer_phase_hosted': {
'title': 'Hosted Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-hosted.yml'
+ 'playbook': 'playbooks/openshift-hosted/config.yml'
},
'installer_phase_metrics': {
'title': 'Metrics Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-metrics.yml'
+ 'playbook': 'playbooks/openshift-metrics/config.yml'
},
'installer_phase_logging': {
'title': 'Logging Install',
@@ -90,15 +90,15 @@ class CallbackModule(CallbackBase):
},
'installer_phase_prometheus': {
'title': 'Prometheus Install',
- 'playbook': 'playbooks/byo/openshift-cluster/openshift-prometheus.yml'
+ 'playbook': 'playbooks/openshift-prometheus/config.yml'
},
'installer_phase_servicecatalog': {
'title': 'Service Catalog Install',
- 'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml'
+ 'playbook': 'playbooks/openshift-service-catalog/config.yml'
},
'installer_phase_management': {
'title': 'Management Install',
- 'playbook': 'playbooks/byo/openshift-management/config.yml'
+ 'playbook': 'playbooks/openshift-management/config.yml'
},
}
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
index 55ab16f74..1cc6d2375 100644
--- a/roles/kuryr/tasks/master.yaml
+++ b/roles/kuryr/tasks/master.yaml
@@ -1,6 +1,6 @@
---
-- name: Perform OpenShit ServiceAccount config
- include: serviceaccount.yaml
+- name: Perform OpenShift ServiceAccount config
+ include_tasks: serviceaccount.yaml
- name: Create kuryr manifests tempdir
command: mktemp -d
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index f3c487132..c264427de 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Set the Nuage certificate directory fact for Atomic hosts
set_fact:
@@ -62,7 +61,7 @@
become: yes
file: path={{ nuage_mon_rest_server_logdir }} state=directory
-- include: serviceaccount.yml
+- include_tasks: serviceaccount.yml
- name: Download the certs and keys
become: yes
@@ -82,7 +81,7 @@
- nuage.key
- nuage.kubeconfig
-- include: certificates.yml
+- include_tasks: certificates.yml
- name: Install Nuage VSD user certificate
become: yes
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index 9db9dbb6a..c6b7a9b10 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -31,7 +31,7 @@
- nuage.key
- nuage.kubeconfig
-- include: certificates.yml
+- include_tasks: certificates.yml
- name: Add additional Docker mounts for Nuage for atomic hosts
become: yes
@@ -44,8 +44,7 @@
- restart node
ignore_errors: true
-- include: iptables.yml
+- include_tasks: iptables.yml
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index f19a421cb..48338ca1b 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -54,7 +54,7 @@ included in this role, or you can [read on below for more examples](#more-exampl
to help you craft you own.
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
Using the `easy-mode.yaml` playbook will produce:
@@ -65,7 +65,7 @@ Using the `easy-mode.yaml` playbook will produce:
> **Note:** If you are running from an RPM install use
-> `/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml`
+> `/usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml`
> instead
## Run from a container
@@ -80,7 +80,7 @@ There are several [examples](../../examples/README.md) in the `examples` directo
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
-> [/playbooks/byo/openshift-checks/certificate_expiry/](../../playbooks/byo/openshift-checks/certificate_expiry/) directory.
+> [/playbooks/openshift-checks/certificate_expiry/](../../playbooks/openshift-checks/certificate_expiry/) directory.
### Default behavior
@@ -99,14 +99,14 @@ This playbook just invokes the certificate expiration check role with default op
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/default.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/default.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/default.yaml)
### Easy mode
@@ -130,14 +130,14 @@ certificates (healthy or not) are included in the results:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/easy-mode.yaml)
### Easy mode and upload reports to masters
@@ -193,14 +193,14 @@ options via environment variables:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
### Generate HTML and JSON artifacts in their default paths
@@ -219,14 +219,14 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/by
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
### Generate HTML and JSON reports in a custom path
@@ -250,14 +250,14 @@ This example customizes the report generation path to point to a specific path (
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
### Long warning window
@@ -278,14 +278,14 @@ the module out):
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/longer_warning_period.yaml)
### Long warning window and JSON report
@@ -307,14 +307,14 @@ the module out) and save the results as a JSON file:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
-> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
+> [View This Playbook](../../playbooks/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
diff --git a/roles/openshift_cli/defaults/main.yml b/roles/openshift_cli/defaults/main.yml
index ed97d539c..82da0639e 100644
--- a/roles/openshift_cli/defaults/main.yml
+++ b/roles/openshift_cli/defaults/main.yml
@@ -1 +1,6 @@
---
+system_images_registry_dict:
+ openshift-enterprise: "registry.access.redhat.com"
+ origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 14d8a3325..06dc5d14b 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -28,13 +28,13 @@
- block:
- name: Pull CLI Image
command: >
- atomic pull --storage ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ atomic pull --storage ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
- image: "{{ '' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}"
+ image: "{{ '' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.common.cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "atomic"
when:
diff --git a/roles/openshift_cloud_provider/tasks/main.yml b/roles/openshift_cloud_provider/tasks/main.yml
index ab3055c8b..dff492a69 100644
--- a/roles/openshift_cloud_provider/tasks/main.yml
+++ b/roles/openshift_cloud_provider/tasks/main.yml
@@ -11,11 +11,11 @@
state: directory
when: has_cloudprovider | bool
-- include: openstack.yml
+- include_tasks: openstack.yml
when: cloudprovider_is_openstack | bool
-- include: aws.yml
+- include_tasks: aws.yml
when: cloudprovider_is_aws | bool
-- include: gce.yml
+- include_tasks: gce.yml
when: cloudprovider_is_gce | bool
diff --git a/roles/openshift_etcd_facts/tasks/main.yml b/roles/openshift_etcd_facts/tasks/main.yml
index 22fb39006..ed97d539c 100644
--- a/roles/openshift_etcd_facts/tasks/main.yml
+++ b/roles/openshift_etcd_facts/tasks/main.yml
@@ -1,5 +1 @@
---
-- openshift_facts:
- role: etcd
- local_facts:
- etcd_image: "{{ osm_etcd_image | default(None) }}"
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
index 5add25b45..21801b994 100644
--- a/roles/openshift_excluder/tasks/disable.yml
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -2,11 +2,11 @@
- when: r_openshift_excluder_verify_upgrade
block:
- name: Include verify_upgrade.yml when upgrading
- include: verify_upgrade.yml
+ include_tasks: verify_upgrade.yml
# unexclude the current openshift/origin-excluder if it is installed so it can be updated
- name: Disable excluders before the upgrade to remove older excluding expressions
- include: unexclude.yml
+ include_tasks: unexclude.yml
vars:
# before the docker excluder can be updated, it needs to be disabled
# to remove older excluded packages that are no longer excluded
@@ -15,12 +15,12 @@
# Install any excluder that is enabled
- name: Include install.yml
- include: install.yml
+ include_tasks: install.yml
# And finally adjust an excluder in order to update host components correctly. First
# exclude then unexclude
- name: Include exclude.yml
- include: exclude.yml
+ include_tasks: exclude.yml
vars:
# Enable the docker excluder only if it is overridden
# BZ #1430612: docker excluders should be enabled even during installation and upgrade
@@ -30,7 +30,7 @@
# All excluders that are to be disabled are disabled
- name: Include unexclude.yml
- include: unexclude.yml
+ include_tasks: unexclude.yml
vars:
# If the docker override is not set, default to the generic behaviour
# BZ #1430612: docker excluders should be enabled even during installation and upgrade
diff --git a/roles/openshift_excluder/tasks/enable.yml b/roles/openshift_excluder/tasks/enable.yml
index fce44cfb5..7c3742a06 100644
--- a/roles/openshift_excluder/tasks/enable.yml
+++ b/roles/openshift_excluder/tasks/enable.yml
@@ -1,6 +1,6 @@
---
- name: Install excluders
- include: install.yml
+ include_tasks: install.yml
- name: Enable excluders
- include: exclude.yml
+ include_tasks: exclude.yml
diff --git a/roles/openshift_excluder/tasks/main.yml b/roles/openshift_excluder/tasks/main.yml
index db20b4012..93d6ef149 100644
--- a/roles/openshift_excluder/tasks/main.yml
+++ b/roles/openshift_excluder/tasks/main.yml
@@ -32,7 +32,7 @@
- r_openshift_excluder_upgrade_target is not defined
- name: Include main action task file
- include: "{{ r_openshift_excluder_action }}.yml"
+ include_tasks: "{{ r_openshift_excluder_action }}.yml"
when:
- not ostree_booted.stat.exists | bool
diff --git a/roles/openshift_excluder/tasks/verify_upgrade.yml b/roles/openshift_excluder/tasks/verify_upgrade.yml
index 42026664a..b55a9af23 100644
--- a/roles/openshift_excluder/tasks/verify_upgrade.yml
+++ b/roles/openshift_excluder/tasks/verify_upgrade.yml
@@ -1,12 +1,12 @@
---
- name: Verify Docker Excluder version
- include: verify_excluder.yml
+ include_tasks: verify_excluder.yml
vars:
excluder: "{{ r_openshift_excluder_service_type }}-docker-excluder"
when: r_openshift_excluder_enable_docker_excluder | bool
- name: Verify OpenShift Excluder version
- include: verify_excluder.yml
+ include_tasks: verify_excluder.yml
vars:
excluder: "{{ r_openshift_excluder_service_type }}-excluder"
when: r_openshift_excluder_enable_openshift_excluder | bool
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 99ebb7e36..f94e0e097 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -1563,7 +1563,8 @@ def set_builddefaults_facts(facts):
# Scaffold out the full expected datastructure
facts['master']['admission_plugin_config'] = {'BuildDefaults': {'configuration': {'env': {}}}}
facts['master']['admission_plugin_config'].update(builddefaults['config'])
- delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
+ if 'env' in facts['master']['admission_plugin_config']['BuildDefaults']['configuration']:
+ delete_empty_keys(facts['master']['admission_plugin_config']['BuildDefaults']['configuration']['env'])
return facts
@@ -1630,7 +1631,6 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift3/node'
ovs_image = 'openshift3/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift3/ose-pod'
router_image = 'openshift3/ose-haproxy-router'
registry_image = 'openshift3/ose-docker-registry'
@@ -1640,7 +1640,6 @@ def set_container_facts_if_unset(facts):
cli_image = master_image
node_image = 'openshift/node'
ovs_image = 'openshift/openvswitch'
- etcd_image = 'registry.access.redhat.com/rhel7/etcd'
pod_image = 'openshift/origin-pod'
router_image = 'openshift/origin-haproxy-router'
registry_image = 'openshift/origin-docker-registry'
@@ -1667,8 +1666,6 @@ def set_container_facts_if_unset(facts):
facts['common']['registry_image'] = registry_image
if 'deployer_image' not in facts['common']:
facts['common']['deployer_image'] = deployer_image
- if 'etcd' in facts and 'etcd_image' not in facts['etcd']:
- facts['etcd']['etcd_image'] = etcd_image
if 'master' in facts and 'master_image' not in facts['master']:
facts['master']['master_image'] = master_image
facts['master']['master_system_image'] = master_image
diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml
index c2954fde1..9b998142a 100644
--- a/roles/openshift_hosted/tasks/storage/glusterfs.yml
+++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml
@@ -79,14 +79,7 @@
- REGISTRY_STORAGE_MAINTENANCE_READONLY_ENABLED: 'true'
when: openshift.hosted.registry.storage.glusterfs.swap
-- name: Unmount registry volume
+- name: Unmount registry volume and clean up mount point/fstab
mount:
- state: unmounted
- name: "{{ mktemp.stdout }}"
-
-- name: Delete temp mount directory
- file:
- dest: "{{ mktemp.stdout }}"
state: absent
- changed_when: False
- check_mode: no
+ name: "{{ mktemp.stdout }}"
diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml
index 69b061fc5..c87a327a4 100644
--- a/roles/openshift_loadbalancer/tasks/main.yml
+++ b/roles/openshift_loadbalancer/tasks/main.yml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install haproxy
package: name=haproxy state=present
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 2bd02af60..770892d52 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -270,7 +270,7 @@
port: 443
targetPort: 4443
selector:
- component: "{{ es_component }}-prometheus"
+ component: "{{ es_component }}"
provider: openshift
- oc_edit:
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index 5a4f7f762..8529b61d5 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -42,7 +42,7 @@ objects:
component: eventrouter
logging-infra: eventrouter
provider: openshift
- replicas: ${REPLICAS}
+ replicas: "${{ '{{' }}REPLICAS{{ '}}' }}"
template:
metadata:
labels:
diff --git a/roles/openshift_logging_fluentd/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf
index f4483df79..87410c1c5 100644
--- a/roles/openshift_logging_fluentd/files/secure-forward.conf
+++ b/roles/openshift_logging_fluentd/files/secure-forward.conf
@@ -1,3 +1,4 @@
+# <store>
# @type secure_forward
# self_hostname ${HOSTNAME}
@@ -22,3 +23,4 @@
# specify hostlabel for FQDN verification if ipaddress is used for host
# hostlabel server.fqdn.example.com
# </server>
+# </store>
diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf
index f4483df79..87410c1c5 100644
--- a/roles/openshift_logging_mux/files/secure-forward.conf
+++ b/roles/openshift_logging_mux/files/secure-forward.conf
@@ -1,3 +1,4 @@
+# <store>
# @type secure_forward
# self_hostname ${HOSTNAME}
@@ -22,3 +23,4 @@
# specify hostlabel for FQDN verification if ipaddress is used for host
# hostlabel server.fqdn.example.com
# </server>
+# </store>
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index cfb13d59b..79e449b73 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -59,7 +59,7 @@ spec:
{% endif %}
{% endif %}
ports:
- - containerPort: "{{ openshift_logging_mux_port }}"
+ - containerPort: {{ openshift_logging_mux_port }}
name: mux-forward
volumeMounts:
- name: config
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 3fb94fff8..99bc12514 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -7,6 +7,12 @@ openshift_master_debug_level: "{{ debug_level | default(2) }}"
r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+system_images_registry_dict:
+ openshift-enterprise: "registry.access.redhat.com"
+ origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
+
openshift_node_ips: []
r_openshift_master_clean_install: false
r_openshift_master_etcd3_storage: false
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index b6d3539b1..e2f92d597 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -31,8 +31,7 @@
- openshift.common.is_containerized | bool
- name: Open up firewall ports
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install Master package
package:
@@ -172,13 +171,13 @@
no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
- name: Update journald config
- include: journald.yml
+ include_tasks: journald.yml
- name: Install the systemd units
- include: systemd_units.yml
+ include_tasks: systemd_units.yml
- name: Install Master system container
- include: system_container.yml
+ include_tasks: system_container.yml
when:
- openshift.common.is_containerized | bool
- openshift.common.is_master_system_container | bool
@@ -212,10 +211,10 @@
- restart master api
- restart master controllers
-- include: bootstrap_settings.yml
+- include_tasks: bootstrap_settings.yml
when: openshift_master_bootstrap_enabled | default(False)
-- include: set_loopback_context.yml
+- include_tasks: set_loopback_context.yml
- name: Start and enable master api on first master
systemd:
@@ -273,7 +272,7 @@
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
-- include: check_master_api_is_ready.yml
+- include_tasks: check_master_api_is_ready.yml
when:
- openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
@@ -323,5 +322,5 @@
- l_install_result | changed
- name: node bootstrap settings
- include: bootstrap.yml
+ include_tasks: bootstrap.yml
when: openshift_master_bootstrap_enabled | default(False)
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 843352532..23386f11b 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -6,7 +6,7 @@
- name: Pre-pull master system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
register: l_pull_result
changed_when: "'Pulling layer' in l_pull_result.stdout"
@@ -18,7 +18,7 @@
- name: Install or Update HA api master system container
oc_atomic_container:
name: "{{ openshift.common.service_type }}-master-api"
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
- COMMAND=api
@@ -26,7 +26,7 @@
- name: Install or Update HA controller master system container
oc_atomic_container:
name: "{{ openshift.common.service_type }}-master-controllers"
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
- COMMAND=controllers
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index b0fa72f19..582185198 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -9,7 +9,7 @@
when:
- openshift.common.is_containerized | bool
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
- name: Disable the legacy master service if it exists
systemd:
diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml
index 92371921d..f84cf2f6e 100644
--- a/roles/openshift_master/tasks/upgrade.yml
+++ b/roles/openshift_master/tasks/upgrade.yml
@@ -1,16 +1,16 @@
---
-- include: upgrade/rpm_upgrade.yml
+- include_tasks: upgrade/rpm_upgrade.yml
when: not openshift.common.is_containerized | bool
-- include: upgrade/upgrade_scheduler.yml
+- include_tasks: upgrade/upgrade_scheduler.yml
# master_config_hook is passed in from upgrade play.
-- include: "upgrade/{{ master_config_hook }}"
+- include_tasks: "upgrade/{{ master_config_hook }}"
when: master_config_hook is defined
-- include: journald.yml
+- include_tasks: journald.yml
-- include: systemd_units.yml
+- include_tasks: systemd_units.yml
- name: Check for ca-bundle.crt
stat:
diff --git a/roles/openshift_master_cluster/tasks/main.yml b/roles/openshift_master_cluster/tasks/main.yml
index 40705d357..41bfc72cb 100644
--- a/roles/openshift_master_cluster/tasks/main.yml
+++ b/roles/openshift_master_cluster/tasks/main.yml
@@ -10,5 +10,5 @@
failed_when: false
when: openshift.master.cluster_method == "pacemaker"
-- include: configure.yml
+- include_tasks: configure.yml
when: "pcs_status | failed and 'Error: cluster is not currently running on this node' in pcs_status.stderr"
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index 3dc15d58b..bb842d710 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -8,4 +8,4 @@
--serial='{{ mktemp.stdout }}/ca.serial.txt'
--name="metrics-signer@{{lookup('pipe','date +%s')}}"
-- include: generate_hawkular_certificates.yaml
+- include_tasks: generate_hawkular_certificates.yaml
diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
index 31129a6ac..0fd19c9f8 100644
--- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml
@@ -1,13 +1,13 @@
---
- name: generate hawkular-metrics certificates
- include: setup_certificate.yaml
+ include_tasks: setup_certificate.yaml
vars:
component: hawkular-metrics
hostnames: "hawkular-metrics,hawkular-metrics.{{ openshift_metrics_project }}.svc.cluster.local,{{ openshift_metrics_hawkular_hostname }}"
changed_when: no
- name: generate hawkular-cassandra certificates
- include: setup_certificate.yaml
+ include_tasks: setup_certificate.yaml
vars:
component: hawkular-cassandra
hostnames: hawkular-cassandra
diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml
index 0eb852d91..a33b28ba7 100644
--- a/roles/openshift_metrics/tasks/install_heapster.yaml
+++ b/roles/openshift_metrics/tasks/install_heapster.yaml
@@ -66,4 +66,4 @@
namespace: "{{ openshift_metrics_project }}"
changed_when: no
-- include: generate_heapster_secrets.yaml
+- include_tasks: generate_heapster_secrets.yaml
diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml
index fdf4ae57f..49d1d8cf1 100644
--- a/roles/openshift_metrics/tasks/install_metrics.yaml
+++ b/roles/openshift_metrics/tasks/install_metrics.yaml
@@ -1,8 +1,8 @@
---
-- include: pre_install.yaml
+- include_tasks: pre_install.yaml
- name: Install Metrics
- include: "{{ role_path }}/tasks/install_{{ include_file }}.yaml"
+ include_tasks: "install_{{ include_file }}.yaml"
with_items:
- support
- heapster
@@ -13,11 +13,11 @@
when: not openshift_metrics_heapster_standalone | bool
- name: Install Heapster Standalone
- include: install_heapster.yaml
+ include_tasks: install_heapster.yaml
when: openshift_metrics_heapster_standalone | bool
- name: Install Hawkular OpenShift Agent (HOSA)
- include: install_hosa.yaml
+ include_tasks: install_hosa.yaml
when: openshift_metrics_install_hawkular_agent | default(false) | bool
- find:
@@ -34,7 +34,7 @@
changed_when: no
- name: Create objects
- include: oc_apply.yaml
+ include_tasks: oc_apply.yaml
vars:
kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
namespace: "{{ openshift_metrics_project }}"
@@ -58,7 +58,7 @@
changed_when: no
- name: Create Hawkular Agent objects
- include: oc_apply.yaml
+ include_tasks: oc_apply.yaml
vars:
kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
namespace: "{{ openshift_metrics_hawkular_agent_namespace }}"
@@ -67,7 +67,7 @@
with_items: "{{ hawkular_agent_object_defs.results }}"
when: openshift_metrics_install_hawkular_agent | bool
-- include: update_master_config.yaml
+- include_tasks: update_master_config.yaml
- command: >
{{openshift.common.client_binary}}
@@ -80,11 +80,11 @@
changed_when: no
- name: Scaling down cluster to recognize changes
- include: stop_metrics.yaml
+ include_tasks: stop_metrics.yaml
when: existing_metrics_rc.stdout_lines | length > 0
- name: Scaling up cluster
- include: start_metrics.yaml
+ include_tasks: start_metrics.yaml
tags: openshift_metrics_start_cluster
when:
- openshift_metrics_start_cluster | default(true) | bool
diff --git a/roles/openshift_metrics/tasks/install_support.yaml b/roles/openshift_metrics/tasks/install_support.yaml
index 584e3be05..c3727d530 100644
--- a/roles/openshift_metrics/tasks/install_support.yaml
+++ b/roles/openshift_metrics/tasks/install_support.yaml
@@ -19,7 +19,7 @@
- fail: msg="'keytool' is unavailable. Please install java-1.8.0-openjdk-headless on the control node"
when: keytool_check.rc == 1
-- include: generate_certificates.yaml
-- include: generate_serviceaccounts.yaml
-- include: generate_services.yaml
-- include: generate_rolebindings.yaml
+- include_tasks: generate_certificates.yaml
+- include_tasks: generate_serviceaccounts.yaml
+- include_tasks: generate_services.yaml
+- include_tasks: generate_rolebindings.yaml
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 10509fc1e..9dfe360bb 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -43,15 +43,15 @@
check_mode: no
tags: metrics_init
-- include: install_metrics.yaml
+- include_tasks: install_metrics.yaml
when:
- openshift_metrics_install_metrics | bool
-- include: uninstall_metrics.yaml
+- include_tasks: uninstall_metrics.yaml
when:
- not openshift_metrics_install_metrics | bool
-- include: uninstall_hosa.yaml
+- include_tasks: uninstall_hosa.yaml
when: not openshift_metrics_install_hawkular_agent | bool
- name: Delete temp directory
diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
index 403b1252c..1265c7bfd 100644
--- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml
+++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml
@@ -1,6 +1,6 @@
---
- name: stop metrics
- include: stop_metrics.yaml
+ include_tasks: stop_metrics.yaml
- name: remove metrics components
command: >
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 89d154ad7..62208c155 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -4,9 +4,18 @@ openshift_node_debug_level: "{{ debug_level | default(2) }}"
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
+system_images_registry_dict:
+ openshift-enterprise: "registry.access.redhat.com"
+ origin: "docker.io"
+
+system_images_registry: "{{ system_images_registry_dict[openshift_deployment_type | default('origin')] }}"
+l_is_openvswitch_system_container: "{{ (openshift_use_openvswitch_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"
+
openshift_image_tag: ''
default_r_openshift_node_image_prep_packages:
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index b8be50f6c..ac43ef039 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -32,8 +32,7 @@
regexp: "^CONFIG_FILE=.*"
- name: include aws sysconfig credentials
- include: aws.yml
- static: yes
+ import_tasks: aws.yml
when: not (openshift_node_use_instance_profiles | default(False))
#- name: update the ExecStart to have bootstrap
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index 2fea33454..741a2234f 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -1,6 +1,6 @@
---
- name: Install the systemd units
- include: systemd_units.yml
+ include_tasks: systemd_units.yml
- name: Start and enable openvswitch service
systemd:
@@ -47,8 +47,7 @@
- restart node
- name: include aws provider credentials
- include: aws.yml
- static: yes
+ import_tasks: aws.yml
when: not (openshift_node_use_instance_profiles | default(False))
# Necessary because when you're on a node that's also a master the master will be
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 6b7e40491..9a91e2fb6 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -20,7 +20,7 @@
- when:
- openshift.common.is_containerized | bool
- - not openshift.common.is_node_system_container | bool
+ - not l_is_node_system_container | bool
block:
- name: Pre-pull node image when containerized
command: >
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index eae9ca7bc..8e9d1d1b5 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -7,8 +7,7 @@
- not openshift_use_crio | default(false)
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
#### Disable SWAP #####
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
@@ -41,7 +40,7 @@
#### End Disable Swap Block ####
- name: include node installer
- include: install.yml
+ include_tasks: install.yml
- name: Restart cri-o
systemd:
@@ -66,34 +65,34 @@
sysctl_file: "/etc/sysctl.d/99-openshift.conf"
reload: yes
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
- name: include standard node config
- include: config.yml
+ include_tasks: config.yml
#### Storage class plugins here ####
- name: NFS storage plugin configuration
- include: storage_plugins/nfs.yml
+ include_tasks: storage_plugins/nfs.yml
tags:
- nfs
- name: GlusterFS storage plugin configuration
- include: storage_plugins/glusterfs.yml
+ include_tasks: storage_plugins/glusterfs.yml
when: "'glusterfs' in openshift.node.storage_plugin_deps"
- name: Ceph storage plugin configuration
- include: storage_plugins/ceph.yml
+ include_tasks: storage_plugins/ceph.yml
when: "'ceph' in openshift.node.storage_plugin_deps"
- name: iSCSI storage plugin configuration
- include: storage_plugins/iscsi.yml
+ include_tasks: storage_plugins/iscsi.yml
when: "'iscsi' in openshift.node.storage_plugin_deps"
##### END Storage #####
-- include: config/workaround-bz1331590-ovs-oom-fix.yml
+- include_tasks: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift_node_use_openshift_sdn | default(true) | bool
- name: include bootstrap node config
- include: bootstrap.yml
+ include_tasks: bootstrap.yml
when: openshift_node_bootstrap
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 164a79b39..73dc9e130 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -6,14 +6,14 @@
- name: Pre-pull node system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update node system container
oc_atomic_container:
name: "{{ openshift.common.service_type }}-node"
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}"
values:
- "DNS_DOMAIN={{ openshift.common.dns_domain }}"
- "DOCKER_SERVICE={{ openshift.docker.service_name }}.service"
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index 0f73ce454..8c3548475 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -17,14 +17,14 @@
- name: Pre-pull OpenVSwitch system container image
command: >
- atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
+ atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Install or Update OpenVSwitch system container
oc_atomic_container:
name: openvswitch
- image: "{{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
+ image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}"
state: latest
values:
- "DOCKER_SERVICE={{ l_service_name }}"
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 9c182ade6..397e1ba18 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -3,7 +3,7 @@
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
- when: not openshift.common.is_node_system_container | bool
+ when: not l_is_node_system_container | bool
notify:
- reload systemd units
- restart node
@@ -11,21 +11,21 @@
- when: openshift.common.is_containerized | bool
block:
- name: include node deps docker service file
- include: config/install-node-deps-docker-service-file.yml
+ include_tasks: config/install-node-deps-docker-service-file.yml
- name: include ovs service environment file
- include: config/install-ovs-service-env-file.yml
+ include_tasks: config/install-ovs-service-env-file.yml
- name: Install Node system container
- include: node_system_container.yml
+ include_tasks: node_system_container.yml
when:
- - openshift.common.is_node_system_container | bool
+ - l_is_node_system_container | bool
- name: Install OpenvSwitch system containers
- include: openvswitch_system_container.yml
+ include_tasks: openvswitch_system_container.yml
when:
- openshift_node_use_openshift_sdn | bool
- - openshift.common.is_openvswitch_system_container | bool
+ - l_is_openvswitch_system_container | bool
- block:
- name: Pre-pull openvswitch image
@@ -34,11 +34,11 @@
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
- - include: config/install-ovs-docker-service-file.yml
+ - include_tasks: config/install-ovs-docker-service-file.yml
when:
- openshift.common.is_containerized | bool
- openshift_node_use_openshift_sdn | bool
- - not openshift.common.is_openvswitch_system_container | bool
+ - not l_is_openvswitch_system_container | bool
-- include: config/configure-node-settings.yml
-- include: config/configure-proxy-settings.yml
+- include_tasks: config/configure-node-settings.yml
+- include_tasks: config/configure-proxy-settings.yml
diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml
index 2bca1e974..fb21b39a1 100644
--- a/roles/openshift_node/tasks/upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade.yml
@@ -10,7 +10,7 @@
# tasks file for openshift_node_upgrade
-- include: registry_auth.yml
+- include_tasks: registry_auth.yml
- name: Stop node and openvswitch services
service:
@@ -48,7 +48,7 @@
- openshift.common.is_containerized | bool
- openshift_use_openshift_sdn | bool
-- include: docker/upgrade.yml
+- include_tasks: docker/upgrade.yml
vars:
# We will restart Docker ourselves after everything is ready:
skip_docker_restart: True
@@ -56,10 +56,10 @@
- l_docker_upgrade is defined
- l_docker_upgrade | bool
-- include: "{{ node_config_hook }}"
+- include_tasks: "{{ node_config_hook }}"
when: node_config_hook is defined
-- include: upgrade/rpm_upgrade.yml
+- include_tasks: upgrade/rpm_upgrade.yml
vars:
component: "node"
openshift_version: "{{ openshift_pkg_version | default('') }}"
@@ -70,7 +70,7 @@
path: "/etc/systemd/system/docker.service.d/docker-sdn-ovs.conf"
state: absent
-- include: upgrade/containerized_node_upgrade.yml
+- include_tasks: upgrade/containerized_node_upgrade.yml
when: openshift.common.is_containerized | bool
- name: Ensure containerized services stopped before Docker restart
@@ -165,7 +165,7 @@
value: "/etc/origin/node/resolv.conf"
# Restart all services
-- include: upgrade/restart.yml
+- include_tasks: upgrade/restart.yml
- name: Wait for node to be ready
oc_obj:
diff --git a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
index 96b94d8b6..245de60a7 100644
--- a/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
+++ b/roles/openshift_node/tasks/upgrade/containerized_node_upgrade.yml
@@ -6,7 +6,7 @@
skip_node_svc_handlers: True
- name: Update systemd units
- include: ../systemd_units.yml
+ include_tasks: ../systemd_units.yml
# This is a no-op because of skip_node_svc_handlers, but lets us trigger it before end of
# play when the node has already been marked schedulable again. (this would look strange
diff --git a/roles/openshift_node_dnsmasq/tasks/main.yml b/roles/openshift_node_dnsmasq/tasks/main.yml
index 9bbaafc29..a33b78780 100644
--- a/roles/openshift_node_dnsmasq/tasks/main.yml
+++ b/roles/openshift_node_dnsmasq/tasks/main.yml
@@ -59,9 +59,9 @@
state: started
# Dynamic NetworkManager based dispatcher
-- include: ./network-manager.yml
+- include_tasks: ./network-manager.yml
when: network_manager_active | bool
# Relies on ansible in order to configure static config
-- include: ./no-network-manager.yml
+- include_tasks: ./no-network-manager.yml
when: not network_manager_active | bool
diff --git a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
index 8a7da66c2..dede2fb8f 100644
--- a/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/no-network-manager.yml
@@ -8,4 +8,4 @@
state: present
notify: restart NetworkManager
-- include: ./network-manager.yml
+- include_tasks: network-manager.yml
diff --git a/roles/openshift_node_group/defaults/main.yml b/roles/openshift_node_group/defaults/main.yml
index d398a7fdc..7c81409a5 100644
--- a/roles/openshift_node_group/defaults/main.yml
+++ b/roles/openshift_node_group/defaults/main.yml
@@ -23,4 +23,4 @@ openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | de
openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}"
openshift_node_group_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_group_node_data_dir: "{{ openshift_node_group_node_data_dir_default }}"
-openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) }}"
+openshift_node_group_network_mtu: "{{ openshift_node_sdn_mtu | default(8951) | int }}"
diff --git a/roles/openshift_node_group/tasks/main.yml b/roles/openshift_node_group/tasks/main.yml
index c7c15683d..43ecf1b8b 100644
--- a/roles/openshift_node_group/tasks/main.yml
+++ b/roles/openshift_node_group/tasks/main.yml
@@ -1,6 +1,6 @@
---
- name: Build node config maps
- include: create_config.yml
+ include_tasks: create_config.yml
vars:
openshift_node_group_name: "{{ node_group.name }}"
openshift_node_group_edits: "{{ node_group.edits | default([]) }}"
diff --git a/roles/openshift_node_group/templates/node-config.yaml.j2 b/roles/openshift_node_group/templates/node-config.yaml.j2
index 5e22dc6d2..3fd16247c 100644
--- a/roles/openshift_node_group/templates/node-config.yaml.j2
+++ b/roles/openshift_node_group/templates/node-config.yaml.j2
@@ -33,7 +33,7 @@ masterClientConnectionOverrides:
qps: 20
masterKubeConfig: node.kubeconfig
networkConfig:
- mtu: "{{ openshift_node_group_network_mtu }}"
+ mtu: {{ openshift_node_group_network_mtu }}
networkPluginName: {{ openshift_node_group_network_plugin }}
nodeIP: ""
podManifestConfig: null
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index 92f74928c..5bf6e7d77 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -23,6 +23,13 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_prometheus_<COMPONENT>_image_version`: specify image version for the component
+- `openshift_prometheus_args`: Modify or add arguments for prometheus application
+
+e.g
+```
+openshift_prometheus_args=['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=5s', '--storage.tsdb.max-block-duration=6m']
+```
+
## PVC related variables
Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 4e2cea0b9..1b4a12cac 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -9,6 +9,9 @@ openshift_prometheus_node_selector: {"region":"infra"}
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
+#prometheus application arguments
+openshift_prometheus_args: ['--storage.tsdb.retention=6h', '--storage.tsdb.min-block-duration=2m']
+
# storage
# One of ['emptydir', 'pvc']
openshift_prometheus_storage_type: "emptydir"
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index 21da4bc9d..50736a9ee 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -3,7 +3,7 @@
# namespace
- name: Add prometheus project
oc_project:
- state: "{{ state }}"
+ state: present
name: "{{ openshift_prometheus_namespace }}"
node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}"
description: Prometheus
@@ -11,7 +11,7 @@
# secrets
- name: Set alert and prometheus secrets
oc_secret:
- state: "{{ state }}"
+ state: present
name: "{{ item }}-proxy"
namespace: "{{ openshift_prometheus_namespace }}"
contents:
@@ -24,7 +24,7 @@
# serviceaccount
- name: create prometheus serviceaccount
oc_serviceaccount:
- state: "{{ state }}"
+ state: present
name: prometheus
namespace: "{{ openshift_prometheus_namespace }}"
# TODO add annotations when supproted
@@ -48,7 +48,7 @@
# create clusterrolebinding for prometheus serviceaccount
- name: Set cluster-reader permissions for prometheus
oc_adm_policy_user:
- state: "{{ state }}"
+ state: present
namespace: "{{ openshift_prometheus_namespace }}"
resource_kind: cluster-role
resource_name: cluster-reader
@@ -58,7 +58,7 @@
# TODO join into 1 task with loop
- name: Create prometheus service
oc_service:
- state: "{{ state }}"
+ state: present
name: "{{ item.name }}"
namespace: "{{ openshift_prometheus_namespace }}"
selector:
@@ -76,7 +76,7 @@
- name: Create alerts service
oc_service:
- state: "{{ state }}"
+ state: present
name: "{{ item.name }}"
namespace: "{{ openshift_prometheus_namespace }}"
selector:
@@ -111,7 +111,7 @@
# create prometheus and alerts routes
- name: create prometheus and alerts routes
oc_route:
- state: "{{ state }}"
+ state: present
name: "{{ item.name }}"
namespace: "{{ openshift_prometheus_namespace }}"
service_name: "{{ item.name }}"
@@ -185,7 +185,7 @@
# In prometheus configmap create "additional.rules" section if file exists
- name: Set prometheus configmap
oc_configmap:
- state: "{{ state }}"
+ state: present
name: "prometheus"
namespace: "{{ openshift_prometheus_namespace }}"
from_file:
@@ -196,7 +196,7 @@
- name: Set prometheus configmap
oc_configmap:
- state: "{{ state }}"
+ state: present
name: "prometheus"
namespace: "{{ openshift_prometheus_namespace }}"
from_file:
@@ -212,7 +212,7 @@
- name: Set alertmanager configmap
oc_configmap:
- state: "{{ state }}"
+ state: present
name: "prometheus-alerts"
namespace: "{{ openshift_prometheus_namespace }}"
from_file:
@@ -229,7 +229,7 @@
- name: Set prometheus stateful set
oc_obj:
- state: "{{ state }}"
+ state: present
name: "prometheus"
namespace: "{{ openshift_prometheus_namespace }}"
kind: statefulset
diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml
index 5cc9a67eb..38798e1f5 100644
--- a/roles/openshift_prometheus/tasks/main.yaml
+++ b/roles/openshift_prometheus/tasks/main.yaml
@@ -20,9 +20,11 @@
mode: 0755
changed_when: False
-- include: install_prometheus.yaml
- vars:
- state: "{{ openshift_prometheus_state }}"
+- include_tasks: install_prometheus.yaml
+ when: openshift_prometheus_state == 'present'
+
+- include_tasks: uninstall_prometheus.yaml
+ when: openshift_prometheus_state == 'absent'
- name: Delete temp directory
file:
diff --git a/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml
new file mode 100644
index 000000000..d746402db
--- /dev/null
+++ b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml
@@ -0,0 +1,7 @@
+---
+
+# remove namespace - This will delete all the objects inside the namespace
+- name: Remove prometheus project
+ oc_project:
+ state: absent
+ name: "{{ openshift_prometheus_namespace }}"
diff --git a/roles/openshift_prometheus/templates/prometheus.j2 b/roles/openshift_prometheus/templates/prometheus.j2
index 456db3a57..e73a94eee 100644
--- a/roles/openshift_prometheus/templates/prometheus.j2
+++ b/roles/openshift_prometheus/templates/prometheus.j2
@@ -75,8 +75,9 @@ spec:
- name: prometheus
args:
- - --storage.tsdb.retention=6h
- - --storage.tsdb.min-block-duration=2m
+{% for arg in openshift_prometheus_args %}
+ - {{ arg }}
+{% endfor %}
- --config.file=/etc/prometheus/prometheus.yml
- --web.listen-address=localhost:9090
image: "{{ l_openshift_prometheus_image_prefix }}prometheus:{{ l_openshift_prometheus_image_version }}"
diff --git a/roles/openshift_provisioners/tasks/install_provisioners.yaml b/roles/openshift_provisioners/tasks/install_provisioners.yaml
index 324fdcc82..2d1217c74 100644
--- a/roles/openshift_provisioners/tasks/install_provisioners.yaml
+++ b/roles/openshift_provisioners/tasks/install_provisioners.yaml
@@ -16,10 +16,10 @@
when: (openshift_provisioners_efs | bool) and openshift_provisioners_efs_aws_secret_access_key is not defined
- name: Install support
- include: install_support.yaml
+ include_tasks: install_support.yaml
- name: Install EFS
- include: install_efs.yaml
+ include_tasks: install_efs.yaml
when: openshift_provisioners_efs | bool
- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
@@ -32,7 +32,7 @@
changed_when: no
- name: Create objects
- include: oc_apply.yaml
+ include_tasks: oc_apply.yaml
vars:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- namespace: "{{ openshift_provisioners_project }}"
@@ -51,5 +51,5 @@
when: ansible_check_mode
- name: Scaling up cluster
- include: start_cluster.yaml
+ include_tasks: start_cluster.yaml
when: start_cluster | default(true) | bool
diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml
index d6db81ab9..93c4c394d 100644
--- a/roles/openshift_provisioners/tasks/install_support.yaml
+++ b/roles/openshift_provisioners/tasks/install_support.yaml
@@ -10,8 +10,8 @@
changed_when: False
check_mode: no
-- include: generate_secrets.yaml
+- include_tasks: generate_secrets.yaml
-- include: generate_clusterrolebindings.yaml
+- include_tasks: generate_clusterrolebindings.yaml
-- include: generate_serviceaccounts.yaml
+- include_tasks: generate_serviceaccounts.yaml
diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml
index a50c78c97..4ba26b2b8 100644
--- a/roles/openshift_provisioners/tasks/main.yaml
+++ b/roles/openshift_provisioners/tasks/main.yaml
@@ -12,10 +12,10 @@
check_mode: no
tags: provisioners_init
-- include: "{{ role_path }}/tasks/install_provisioners.yaml"
+- include_tasks: install_provisioners.yaml
when: openshift_provisioners_install_provisioners | default(false) | bool
-- include: "{{ role_path }}/tasks/uninstall_provisioners.yaml"
+- include_tasks: uninstall_provisioners.yaml
when: not openshift_provisioners_install_provisioners | default(false) | bool
- name: Delete temp directory
diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
index 0be4bc7d2..602dee773 100644
--- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
+++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml
@@ -1,6 +1,6 @@
---
- name: stop provisioners
- include: stop_cluster.yaml
+ include_tasks: stop_cluster.yaml
# delete the deployment objects that we had created
- name: delete provisioner api objects
diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml
index 95ba9fe4c..552a22a0f 100644
--- a/roles/openshift_repos/tasks/main.yaml
+++ b/roles/openshift_repos/tasks/main.yaml
@@ -35,7 +35,7 @@
- when: r_openshift_repos_has_run is not defined
block:
- - include: centos_repos.yml
+ - include_tasks: centos_repos.yml
when:
- ansible_os_family == "RedHat"
- ansible_distribution != "Fedora"
diff --git a/roles/openshift_sanitize_inventory/tasks/deprecations.yml b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
index 94d3acffc..795b8ee60 100644
--- a/roles/openshift_sanitize_inventory/tasks/deprecations.yml
+++ b/roles/openshift_sanitize_inventory/tasks/deprecations.yml
@@ -16,6 +16,6 @@
# for with_fileglob Ansible resolves the path relative to the roles/<rolename>/files directory
- name: Assign deprecated variables to correct counterparts
- include: "{{ item }}"
+ include_tasks: "{{ item }}"
with_fileglob:
- "../tasks/__deprecations_*.yml"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index 70b236033..77428272c 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -1,7 +1,7 @@
---
# We should print out deprecations prior to any failures so that if a play does fail for other reasons
# the user would also be aware of any deprecated variables they should note to adjust
-- include: deprecations.yml
+- include_tasks: deprecations.yml
- name: Abort when conflicting deployment type variables are set
when:
@@ -53,7 +53,7 @@
openshift_release is "{{ openshift_release }}" which is not a valid version string.
Please set it to a version string like "3.4".
-- include: unsupported.yml
+- include_tasks: unsupported.yml
when:
- not openshift_enable_unsupported_configurations | default(false) | bool
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index 03c157313..54adcf78d 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -87,7 +87,9 @@ GlusterFS cluster into a new or existing OpenShift cluster:
| openshift_storage_glusterfs_block_deploy | True | Deploy glusterblock provisioner service
| openshift_storage_glusterfs_block_image | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7'
| openshift_storage_glusterfs_block_version | 'latest' | Container image version to use for glusterblock-provisioner pod
-| openshift_storage_glusterfs_block_max_host_vol | 15 | Max number of GlusterFS volumes to host glusterblock volumes
+| openshift_storage_glusterfs_block_host_vol_create| True | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned
+| openshift_storage_glusterfs_block_host_vol_size | 100 | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes
+| openshift_storage_glusterfs_block_host_vol_max | 15 | Max number of GlusterFS volumes to host glusterblock volumes
| openshift_storage_glusterfs_s3_deploy | True | Deploy gluster-s3 service
| openshift_storage_glusterfs_s3_image | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'
| openshift_storage_glusterfs_s3_version | 'latest' | Container image version to use for gluster=s3 pod
diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml
index c3db36d37..814d6ff28 100644
--- a/roles/openshift_storage_glusterfs/defaults/main.yml
+++ b/roles/openshift_storage_glusterfs/defaults/main.yml
@@ -10,7 +10,9 @@ openshift_storage_glusterfs_version: 'latest'
openshift_storage_glusterfs_block_deploy: True
openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"
openshift_storage_glusterfs_block_version: 'latest'
-openshift_storage_glusterfs_block_max_host_vol: 15
+openshift_storage_glusterfs_block_host_vol_create: True
+openshift_storage_glusterfs_block_host_vol_size: 100
+openshift_storage_glusterfs_block_host_vol_max: 15
openshift_storage_glusterfs_s3_deploy: True
openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"
openshift_storage_glusterfs_s3_version: 'latest'
@@ -54,7 +56,9 @@ openshift_storage_glusterfs_registry_version: "{{ openshift_storage_glusterfs_ve
openshift_storage_glusterfs_registry_block_deploy: "{{ openshift_storage_glusterfs_block_deploy }}"
openshift_storage_glusterfs_registry_block_image: "{{ openshift_storage_glusterfs_block_image }}"
openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_glusterfs_block_version }}"
-openshift_storage_glusterfs_registry_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
+openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
+openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"
openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
index 2cc69644c..9c1409dee 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/glusterblock-provisioner.yml
@@ -2,7 +2,7 @@
kind: Template
apiVersion: v1
metadata:
- name: glusterblock
+ name: glusterblock-provisioner
labels:
glusterfs: block-template
glusterblock: template
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
new file mode 100644
index 000000000..34af652c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/deploy-heketi-template.yml
@@ -0,0 +1,133 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
new file mode 100644
index 000000000..064b51473
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-pvcs-template.yml
@@ -0,0 +1,67 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3-pvcs
+ labels:
+ glusterfs: s3-pvcs-template
+ gluster-s3: pvcs-template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${PVC_SIZE}"
+- kind: PersistentVolumeClaim
+ apiVersion: v1
+ metadata:
+ name: "${META_PVC}"
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-storage
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-meta-pvc
+ annotations:
+ volume.beta.kubernetes.io/storage-class: "glusterfs-${CLUSTER_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteMany
+ resources:
+ requests:
+ storage: "${META_PVC_SIZE}"
+parameters:
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ required: true
+- name: PVC_SIZE
+ displayName: Primary GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage
+ value: 2Gi
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ required: true
+- name: META_PVC_SIZE
+ displayName: Metadata GlusterFS-backed PVC capacity
+ description: Capacity for GlusterFS-backed PVC for object storage metadata
+ value: 1Gi
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
new file mode 100644
index 000000000..896a1b226
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/gluster-s3-template.yml
@@ -0,0 +1,140 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: gluster-s3
+ labels:
+ glusterfs: s3-template
+ gluster-s3: template
+ annotations:
+ description: Gluster S3 service template
+ tags: glusterfs,heketi,gluster-s3
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-service
+ spec:
+ ports:
+ - protocol: TCP
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: s3-pod
+ type: ClusterIP
+ sessionAffinity: None
+ status:
+ loadBalancer: {}
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-route
+ spec:
+ to:
+ kind: Service
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-service
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: gluster-s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-dc
+ annotations:
+ openshift.io/scc: privileged
+ description: Defines how to deploy gluster s3 object storage
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ template:
+ metadata:
+ name: gluster-${CLUSTER_NAME}-${S3_ACCOUNT}-s3
+ labels:
+ glusterfs: s3-${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ gluster-s3: ${CLUSTER_NAME}-${S3_ACCOUNT}-pod
+ spec:
+ containers:
+ - name: gluster-s3
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ ports:
+ - name: gluster
+ containerPort: 8080
+ protocol: TCP
+ env:
+ - name: S3_ACCOUNT
+ value: "${S3_ACCOUNT}"
+ - name: S3_USER
+ value: "${S3_USER}"
+ - name: S3_PASSWORD
+ value: "${S3_PASSWORD}"
+ resources: {}
+ volumeMounts:
+ - name: gluster-vol1
+ mountPath: "/mnt/gluster-object/${S3_ACCOUNT}"
+ - name: gluster-vol2
+ mountPath: "/mnt/gluster-object/gsmetadata"
+ - name: glusterfs-cgroup
+ readOnly: true
+ mountPath: "/sys/fs/cgroup"
+ terminationMessagePath: "/dev/termination-log"
+ securityContext:
+ privileged: true
+ volumes:
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: gluster-vol1
+ persistentVolumeClaim:
+ claimName: ${PVC}
+ - name: gluster-vol2
+ persistentVolumeClaim:
+ claimName: ${META_PVC}
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ serviceAccountName: default
+ serviceAccount: default
+ securityContext: {}
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: S3_ACCOUNT
+ displayName: S3 Account Name
+ description: S3 storage account which will provide storage on GlusterFS volumes
+ required: true
+- name: S3_USER
+ displayName: S3 User
+ description: S3 user who can access the S3 storage account
+ required: true
+- name: S3_PASSWORD
+ displayName: S3 User Password
+ description: Password for the S3 user
+ required: true
+- name: PVC
+ displayName: Primary GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage
+ value: gluster-s3-claim
+- name: META_PVC
+ displayName: Metadata GlusterFS-backed PVC
+ description: GlusterFS-backed PVC for object storage metadata
+ value: gluster-s3-meta-claim
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
new file mode 100644
index 000000000..9c1409dee
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterblock-provisioner.yml
@@ -0,0 +1,105 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-template
+ glusterblock: template
+ annotations:
+ description: glusterblock provisioner template
+ tags: glusterfs
+objects:
+- kind: ClusterRole
+ apiVersion: v1
+ metadata:
+ name: glusterblock-provisioner-runner
+ labels:
+ glusterfs: block-provisioner-runner-clusterrole
+ glusterblock: provisioner-runner-clusterrole
+ rules:
+ - apiGroups: [""]
+ resources: ["persistentvolumes"]
+ verbs: ["get", "list", "watch", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["persistentvolumeclaims"]
+ verbs: ["get", "list", "watch", "update"]
+ - apiGroups: ["storage.k8s.io"]
+ resources: ["storageclasses"]
+ verbs: ["get", "list", "watch"]
+ - apiGroups: [""]
+ resources: ["events"]
+ verbs: ["list", "watch", "create", "update", "patch"]
+ - apiGroups: [""]
+ resources: ["services"]
+ verbs: ["get"]
+ - apiGroups: [""]
+ resources: ["secrets"]
+ verbs: ["get", "create", "delete"]
+ - apiGroups: [""]
+ resources: ["routes"]
+ verbs: ["get", "list"]
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-sa
+ glusterblock: ${CLUSTER_NAME}-provisioner-sa
+- apiVersion: v1
+ kind: ClusterRoleBinding
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ roleRef:
+ name: glusterblock-provisioner-runner
+ subjects:
+ - kind: ServiceAccount
+ name: glusterblock-${CLUSTER_NAME}-provisioner
+ namespace: ${NAMESPACE}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: glusterblock-${CLUSTER_NAME}-provisioner-dc
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-dc
+ glusterblock: ${CLUSTER_NAME}-provisioner-dc
+ annotations:
+ description: Defines how to deploy the glusterblock provisioner pod.
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: glusterblock-provisioner
+ labels:
+ glusterfs: block-${CLUSTER_NAME}-provisioner-pod
+ spec:
+ serviceAccountName: glusterblock-${CLUSTER_NAME}-provisioner
+ containers:
+ - name: glusterblock-provisioner
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ image: gluster/glusterblock-provisioner:latest
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: PROVISIONER_NAME
+ value: gluster.org/glusterblock
+parameters:
+- name: IMAGE_NAME
+ displayName: glusterblock provisioner container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: glusterblock provisioner container image version
+ required: True
+- name: NAMESPACE
+ displayName: glusterblock provisioner namespace
+ description: The namespace in which these resources are being created
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
new file mode 100644
index 000000000..09850a2c2
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/glusterfs-template.yml
@@ -0,0 +1,154 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: GB_GLFS_LRU_COUNT
+ value: "${GB_GLFS_LRU_COUNT}"
+ - name: TCMU_LOGDIR
+ value: "${TCMU_LOGDIR}"
+ resources:
+ requests:
+ memory: 100Mi
+ cpu: 100m
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
+- name: GB_GLFS_LRU_COUNT
+ displayName: Maximum number of block hosting volumes
+ description: This value is to set maximum number of block hosting volumes.
+ value: "15"
+ required: true
+- name: TCMU_LOGDIR
+ displayName: Tcmu runner log directory
+ description: This value is to set tcmu runner log directory
+ value: "/var/log/glusterfs/gluster-block"
+ required: true
diff --git a/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
new file mode 100644
index 000000000..28cdb2982
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v3.8/heketi-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ heketi: ${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ heketi: ${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ heketi: ${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ heketi: ${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: ${HEKETI_FSTAB}
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_FSTAB
+ displayName: heketi fstab path
+ description: Set the fstab path, file that is populated with bricks that heketi creates
+ value: /var/lib/heketi/fstab
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
index bba1de654..d6be8c726 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml
@@ -29,21 +29,21 @@
src: "{{ openshift.common.examples_content_version }}/{{ item }}"
dest: "{{ mktemp.stdout }}/{{ item }}"
with_items:
- - "glusterblock-template.yml"
+ - "glusterblock-provisioner.yml"
- name: Create glusterblock provisioner template
oc_obj:
namespace: "{{ glusterfs_namespace }}"
kind: template
- name: "glusterblock"
+ name: "glusterblock-provisioner"
state: present
files:
- - "{{ mktemp.stdout }}/glusterblock-template.yml"
+ - "{{ mktemp.stdout }}/glusterblock-provisioner.yml"
- name: Deploy glusterblock provisioner
oc_process:
namespace: "{{ glusterfs_namespace }}"
- template_name: "glusterblock"
+ template_name: "glusterblock-provisioner"
create: True
params:
IMAGE_NAME: "{{ glusterfs_block_image }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
index e2d740f35..1ede0ae94 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml
@@ -12,7 +12,9 @@
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_block_deploy | bool }}"
glusterfs_block_image: "{{ openshift_storage_glusterfs_block_image }}"
glusterfs_block_version: "{{ openshift_storage_glusterfs_block_version }}"
- glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_block_max_host_vol }}"
+ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"
+ glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"
+ glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
index f98d4c62f..ef37762f9 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml
@@ -87,7 +87,7 @@
IMAGE_VERSION: "{{ glusterfs_version }}"
NODE_LABELS: "{{ glusterfs_nodeselector }}"
CLUSTER_NAME: "{{ glusterfs_name }}"
- GB_GLFS_LRU_COUNT: "{{ glusterfs_block_max_host_vol }}"
+ GB_GLFS_LRU_COUNT: "{{ glusterfs_block_host_vol_max }}"
- name: Wait for GlusterFS pods
oc_obj:
diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
index baac52179..1fa42efa7 100644
--- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
+++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml
@@ -12,7 +12,9 @@
glusterfs_block_deploy: "{{ openshift_storage_glusterfs_registry_block_deploy | bool }}"
glusterfs_block_image: "{{ openshift_storage_glusterfs_registry_block_image }}"
glusterfs_block_version: "{{ openshift_storage_glusterfs_registry_block_version }}"
- glusterfs_block_max_host_vol: "{{ openshift_storage_glusterfs_registry_block_max_host_vol }}"
+ glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}"
+ glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}"
+ glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}"
glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"
glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"
glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}"
diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
index 579b11bb7..565e9be98 100644
--- a/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
+++ b/roles/openshift_storage_glusterfs/templates/v3.7/heketi.json.j2
@@ -31,6 +31,12 @@
"port" : "{{ glusterfs_heketi_ssh_port }}",
"user" : "{{ glusterfs_heketi_ssh_user }}",
"sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
- }
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
}
}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..095fb780f
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
new file mode 100644
index 000000000..565e9be98
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/heketi.json.j2
@@ -0,0 +1,42 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ },
+
+ "_auto_create_block_hosting_volume": "Creates Block Hosting volumes automatically if not found or exsisting volume exhausted",
+ "auto_create_block_hosting_volume": {{ glusterfs_block_host_vol_create | lower }},
+
+ "_block_hosting_volume_size": "New block hosting volume will be created in size mentioned, This is considered only if auto-create is enabled.",
+ "block_hosting_volume_size": {{ glusterfs_block_host_vol_size }}
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v3.8/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index c4e023c1e..24264fa43 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -1,7 +1,6 @@
---
- name: setup firewall
- include: firewall.yml
- static: yes
+ import_tasks: firewall.yml
- name: Install nfs-utils
package: name=nfs-utils state=present
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 1c8b9046c..4f9158ade 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -93,11 +93,11 @@
- inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
block:
- name: Set openshift_version for rpm installation
- include: set_version_rpm.yml
+ include_tasks: set_version_rpm.yml
when: not is_containerized | bool
- name: Set openshift_version for containerized installation
- include: set_version_containerized.yml
+ include_tasks: set_version_containerized.yml
when: is_containerized | bool
- block:
diff --git a/roles/os_firewall/tasks/main.yml b/roles/os_firewall/tasks/main.yml
index c477d386c..99084cd3f 100644
--- a/roles/os_firewall/tasks/main.yml
+++ b/roles/os_firewall/tasks/main.yml
@@ -8,12 +8,12 @@
set_fact:
r_os_firewall_is_atomic: "{{ r_os_firewall_ostree_booted.stat.exists }}"
-- include: firewalld.yml
+- include_tasks: firewalld.yml
when:
- os_firewall_enabled | bool
- os_firewall_use_firewalld | bool
-- include: iptables.yml
+- include_tasks: iptables.yml
when:
- os_firewall_enabled | bool
- not os_firewall_use_firewalld | bool