summaryrefslogtreecommitdiffstats
path: root/playbooks/byo
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks/byo')
-rw-r--r--playbooks/byo/openshift-cfme/config.yml8
-rw-r--r--playbooks/byo/openshift-cfme/uninstall.yml6
-rw-r--r--playbooks/byo/openshift-checks/README.md104
-rw-r--r--playbooks/byo/openshift-checks/adhoc.yml27
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/default.yaml10
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml40
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml18
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml12
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml16
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml13
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml12
l---------playbooks/byo/openshift-checks/certificate_expiry/roles1
-rw-r--r--playbooks/byo/openshift-checks/health.yml6
-rw-r--r--playbooks/byo/openshift-checks/pre-install.yml6
-rw-r--r--playbooks/byo/openshift-cluster/cluster_hosts.yml9
-rw-r--r--playbooks/byo/openshift-cluster/config.yml5
-rw-r--r--playbooks/byo/openshift-cluster/enable_dnsmasq.yml24
-rw-r--r--playbooks/byo/openshift-cluster/initialize_groups.yml10
-rw-r--r--playbooks/byo/openshift-cluster/openshift-logging.yml21
-rw-r--r--playbooks/byo/openshift-cluster/openshift-metrics.yml10
-rw-r--r--playbooks/byo/openshift-cluster/openshift-provisioners.yml6
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml6
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml10
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-master-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-node-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml6
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-router-certificates.yml4
-rw-r--r--playbooks/byo/openshift-cluster/service-catalog.yml18
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/README.md5
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml62
l---------playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh1
l---------playbooks/byo/openshift-cluster/upgrades/docker/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml36
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml24
l---------playbooks/byo/openshift-cluster/upgrades/v3_3/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml104
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml101
l---------playbooks/byo/openshift-cluster/upgrades/v3_4/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml102
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml99
l---------playbooks/byo/openshift-cluster/upgrades/v3_5/roles1
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml102
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml99
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/README.md20
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml9
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/README.md20
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml7
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml16
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml9
-rw-r--r--playbooks/byo/openshift-etcd/config.yml6
-rw-r--r--playbooks/byo/openshift-etcd/migrate.yml6
-rw-r--r--playbooks/byo/openshift-etcd/restart.yml6
-rw-r--r--playbooks/byo/openshift-etcd/scaleup.yml6
-rw-r--r--playbooks/byo/openshift-glusterfs/README.md98
-rw-r--r--playbooks/byo/openshift-glusterfs/config.yml10
l---------playbooks/byo/openshift-glusterfs/filter_plugins1
l---------playbooks/byo/openshift-glusterfs/lookup_plugins1
-rw-r--r--playbooks/byo/openshift-glusterfs/registry.yml10
l---------playbooks/byo/openshift-glusterfs/roles (renamed from playbooks/byo/openshift-preflight/roles)0
-rw-r--r--playbooks/byo/openshift-master/config.yml6
-rw-r--r--playbooks/byo/openshift-master/restart.yml6
-rw-r--r--playbooks/byo/openshift-master/scaleup.yml29
-rw-r--r--playbooks/byo/openshift-node/config.yml6
-rw-r--r--playbooks/byo/openshift-node/network_manager.yml42
-rw-r--r--playbooks/byo/openshift-node/restart.yml6
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml35
-rw-r--r--playbooks/byo/openshift-preflight/README.md43
-rw-r--r--playbooks/byo/openshift-preflight/check.yml13
-rw-r--r--playbooks/byo/openshift_facts.yml5
-rw-r--r--playbooks/byo/rhel_subscribe.yml7
-rw-r--r--playbooks/byo/vagrant.yml4
78 files changed, 698 insertions, 1212 deletions
diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml
new file mode 100644
index 000000000..0e8e7a94d
--- /dev/null
+++ b/playbooks/byo/openshift-cfme/config.yml
@@ -0,0 +1,8 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- include: ../../common/openshift-cfme/config.yml
diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml
new file mode 100644
index 000000000..c8ed16859
--- /dev/null
+++ b/playbooks/byo/openshift-cfme/uninstall.yml
@@ -0,0 +1,6 @@
+---
+# - include: ../openshift-cluster/initialize_groups.yml
+# tags:
+# - always
+
+- include: ../../common/openshift-cfme/uninstall.yml
diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md
new file mode 100644
index 000000000..b26e7d7ed
--- /dev/null
+++ b/playbooks/byo/openshift-checks/README.md
@@ -0,0 +1,104 @@
+# OpenShift health checks
+
+This directory contains Ansible playbooks for detecting potential problems prior
+to an install, as well as health checks to run on existing OpenShift clusters.
+
+Ansible's default operation mode is to fail fast, on the first error. However,
+when performing checks, it is useful to gather as much information about
+problems as possible in a single run.
+
+Thus, the playbooks run a battery of checks against the inventory hosts and
+gather intermediate errors, giving a more complete diagnostic of the state of
+each host. If any check failed, the playbook run will be marked as failed.
+
+To facilitate understanding the problems that were encountered, a custom
+callback plugin summarizes execution errors at the end of a playbook run.
+
+## Available playbooks
+
+1. Pre-install playbook ([pre-install.yml](pre-install.yml)) - verifies system
+ requirements and look for common problems that can prevent a successful
+ installation of a production cluster.
+
+2. Diagnostic playbook ([health.yml](health.yml)) - check an existing cluster
+ for known signs of problems.
+
+3. Certificate expiry playbooks ([certificate_expiry](certificate_expiry)) -
+ check that certificates in use are valid and not expiring soon.
+
+4. Adhoc playbook ([adhoc.yml](adhoc.yml)) - use it to run adhoc checks or to
+ list existing checks.
+ See the [next section](#the-adhoc-playbook) for a usage example.
+
+## Running
+
+With a [recent installation of Ansible](../../../README.md#setup), run the playbook
+against your inventory file. Here is the step-by-step:
+
+1. If you haven't done it yet, clone this repository:
+
+ ```console
+ $ git clone https://github.com/openshift/openshift-ansible
+ $ cd openshift-ansible
+ ```
+
+2. Install the [dependencies](../../../README.md#setup)
+
+3. Run the appropriate playbook:
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml
+ ```
+
+ or
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml
+ ```
+
+ or
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v
+ ```
+
+### The adhoc playbook
+
+The adhoc playbook gives flexibility to run any check or a custom group of
+checks. What will be run is determined by the `openshift_checks` variable,
+which, among other ways supported by Ansible, can be set on the command line
+using the `-e` flag.
+
+For example, to run the `docker_storage` check:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage
+```
+
+To run more checks, use a comma-separated list of check names:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=docker_storage,disk_availability
+```
+
+To run an entire class of checks, use the name of a check group tag, prefixed by `@`. This will run all checks tagged `preflight`:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml -e openshift_checks=@preflight
+```
+
+It is valid to specify multiple check tags and individual check names together
+in a comma-separated list.
+
+To list all of the available checks and tags, run the adhoc playbook without
+setting the `openshift_checks` variable:
+
+```console
+$ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/adhoc.yml
+```
+
+## Running in a container
+
+This repository is built into a Docker image including Ansible so that it can
+be run anywhere Docker is available, without the need to manually install dependencies.
+Instructions for doing so may be found [in the README](../../../README_CONTAINER_IMAGE.md).
diff --git a/playbooks/byo/openshift-checks/adhoc.yml b/playbooks/byo/openshift-checks/adhoc.yml
new file mode 100644
index 000000000..226bed732
--- /dev/null
+++ b/playbooks/byo/openshift-checks/adhoc.yml
@@ -0,0 +1,27 @@
+---
+# NOTE: ideally this would be just part of a single play in
+# common/openshift-checks/adhoc.yml that lists the existing checks when
+# openshift_checks is not set or run the requested checks. However, to actually
+# run the checks we need to have the included dependencies to run first and that
+# takes time. To speed up listing checks, we use this separate play that runs
+# before the include of dependencies to save time and improve the UX.
+- name: OpenShift health checks
+ # NOTE: though the openshift_checks variable could be potentially defined on
+ # individual hosts while not defined for localhost, we do not support that
+ # usage. Running this play only in localhost speeds up execution.
+ hosts: localhost
+ connection: local
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: adhoc
+ pre_tasks:
+ - name: List known health checks
+ action: openshift_health_check
+ when: openshift_checks is undefined or not openshift_checks
+
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-checks/adhoc.yml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/default.yaml b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
new file mode 100644
index 000000000..630135cae
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
@@ -0,0 +1,10 @@
+---
+# Default behavior, you will need to ensure you run ansible with the
+# -v option to see report results:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
new file mode 100644
index 000000000..378d1f154
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
@@ -0,0 +1,40 @@
+# This example generates HTML and JSON reports and
+#
+# Copies of the generated HTML and JSON reports are uploaded to the masters,
+# which is particularly useful when this playbook is run from a container.
+#
+# All certificates (healthy or not) are included in the results
+#
+# Optional environment variables to alter the behaviour of the playbook:
+# CERT_EXPIRY_WARN_DAYS: Length of the warning window in days (45)
+# COPY_TO_PATH: path to copy reports to in the masters (/etc/origin/certificate_expiration_report)
+---
+- name: Generate certificate expiration reports
+ hosts: nodes:masters:etcd
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ openshift_certificate_expiry_warning_days: "{{ lookup('env', 'CERT_EXPIRY_WARN_DAYS') | default('45', true) }}"
+ roles:
+ - role: openshift_certificate_expiry
+
+- name: Upload reports to master
+ hosts: masters
+ gather_facts: no
+ vars:
+ destination_path: "{{ lookup('env', 'COPY_TO_PATH') | default('/etc/origin/certificate_expiration_report', true) }}"
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ tasks:
+ - name: Ensure that the target directory exists
+ file:
+ path: "{{ destination_path }}"
+ state: directory
+ - name: Copy the reports
+ copy:
+ dest: "{{ destination_path }}/{{ timestamp }}-{{ item }}"
+ src: "/tmp/{{ item }}"
+ with_items:
+ - "cert-expiry-report.html"
+ - "cert-expiry-report.json"
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
new file mode 100644
index 000000000..ae41c7c14
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
@@ -0,0 +1,18 @@
+---
+# This example playbook is great if you're just wanting to try the
+# role out.
+#
+# This example enables HTML and JSON reports
+#
+# All certificates (healthy or not) are included in the results
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
new file mode 100644
index 000000000..d80cb6ff4
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
@@ -0,0 +1,12 @@
+---
+# Generate HTML and JSON artifacts in their default paths:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
new file mode 100644
index 000000000..2189455b7
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
@@ -0,0 +1,16 @@
+---
+# Generate timestamped HTML and JSON reports in /var/lib/certcheck
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_generate_html_report: yes
+ openshift_certificate_expiry_save_json_results: yes
+ openshift_certificate_expiry_show_all: yes
+ timestamp: "{{ lookup('pipe', 'date +%Y%m%d') }}"
+ openshift_certificate_expiry_html_report_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.html"
+ openshift_certificate_expiry_json_results_path: "/var/lib/certcheck/{{ timestamp }}-cert-expiry-report.json"
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
new file mode 100644
index 000000000..87a0f3be4
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
@@ -0,0 +1,13 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out) and save the results as a JSON file:
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ openshift_certificate_expiry_save_json_results: yes
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
new file mode 100644
index 000000000..960457c4b
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
@@ -0,0 +1,12 @@
+---
+# Change the expiration warning window to 1500 days (good for testing
+# the module out):
+
+- name: Check cert expirys
+ hosts: nodes:masters:etcd
+ become: yes
+ gather_facts: no
+ vars:
+ openshift_certificate_expiry_warning_days: 1500
+ roles:
+ - role: openshift_certificate_expiry
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles
new file mode 120000
index 000000000..4bdbcbad3
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/roles
@@ -0,0 +1 @@
+../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml
new file mode 100644
index 000000000..96a71e4dc
--- /dev/null
+++ b/playbooks/byo/openshift-checks/health.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-checks/health.yml
diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml
new file mode 100644
index 000000000..dd93df0bb
--- /dev/null
+++ b/playbooks/byo/openshift-checks/pre-install.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift-cluster/cluster_hosts.yml b/playbooks/byo/openshift-cluster/cluster_hosts.yml
index cb464cf0d..e807ac004 100644
--- a/playbooks/byo/openshift-cluster/cluster_hosts.yml
+++ b/playbooks/byo/openshift-cluster/cluster_hosts.yml
@@ -1,6 +1,8 @@
---
g_etcd_hosts: "{{ groups.etcd | default([]) }}"
+g_new_etcd_hosts: "{{ groups.new_etcd | default([]) }}"
+
g_lb_hosts: "{{ groups.lb | default([]) }}"
g_master_hosts: "{{ groups.masters | default([]) }}"
@@ -13,7 +15,12 @@ g_new_node_hosts: "{{ groups.new_nodes | default([]) }}"
g_nfs_hosts: "{{ groups.nfs | default([]) }}"
+g_glusterfs_hosts: "{{ groups.glusterfs | default([]) }}"
+
+g_glusterfs_registry_hosts: "{{ groups.glusterfs_registry | default(g_glusterfs_hosts) }}"
+
g_all_hosts: "{{ g_master_hosts | union(g_node_hosts) | union(g_etcd_hosts)
- | union(g_lb_hosts) | union(g_nfs_hosts)
+ | union(g_new_etcd_hosts) | union(g_lb_hosts) | union(g_nfs_hosts)
| union(g_new_node_hosts)| union(g_new_master_hosts)
+ | union(g_glusterfs_hosts) | union(g_glusterfs_registry_hosts)
| default([]) }}"
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 86eff4ca4..acf5469bf 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
@@ -7,5 +11,4 @@
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_type: "{{ deployment_type }}"
openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
index 32f9ebfd3..9ce8f0d3c 100644
--- a/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
+++ b/playbooks/byo/openshift-cluster/enable_dnsmasq.yml
@@ -1,26 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: initialize_groups.yml
- include: ../../common/openshift-cluster/enable_dnsmasq.yml
diff --git a/playbooks/byo/openshift-cluster/initialize_groups.yml b/playbooks/byo/openshift-cluster/initialize_groups.yml
new file mode 100644
index 000000000..2a725510a
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/initialize_groups.yml
@@ -0,0 +1,10 @@
+---
+- name: Create initial host groups for localhost
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - include_vars: cluster_hosts.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-logging.yml b/playbooks/byo/openshift-cluster/openshift-logging.yml
index eebfcd20d..bbec3a4c2 100644
--- a/playbooks/byo/openshift-cluster/openshift-logging.yml
+++ b/playbooks/byo/openshift-cluster/openshift-logging.yml
@@ -4,32 +4,15 @@
# Hosted logging on. See inventory/byo/hosts.*.example for the
# currently supported method.
#
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
+- include: initialize_groups.yml
tags:
- always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
+- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- include: ../../common/openshift-cluster/openshift_logging.yml
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-cluster/openshift-metrics.yml b/playbooks/byo/openshift-cluster/openshift-metrics.yml
new file mode 100644
index 000000000..1135c8c11
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-metrics.yml
@@ -0,0 +1,10 @@
+---
+- include: initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/openshift_metrics.yml
diff --git a/playbooks/byo/openshift-cluster/openshift-provisioners.yml b/playbooks/byo/openshift-cluster/openshift-provisioners.yml
new file mode 100644
index 000000000..8e80f158b
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/openshift-provisioners.yml
@@ -0,0 +1,6 @@
+---
+- include: initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-cluster/openshift_provisioners.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
index ad24b9ad0..a3894e243 100644
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
@@ -16,5 +20,7 @@
- include: ../../common/openshift-node/restart.yml
- include: ../../common/openshift-cluster/redeploy-certificates/router.yml
+ when: openshift_hosted_manage_router | default(true) | bool
- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml
+ when: openshift_hosted_manage_registry | default(true) | bool
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
new file mode 100644
index 000000000..29f821eda
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml
@@ -0,0 +1,10 @@
+---
+- include: initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
index ee49364fa..8516baee8 100644
--- a/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-etcd-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
index 9c8248c4e..566e8b261 100644
--- a/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-master-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
index 1695111d0..42777e5e6 100644
--- a/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-node-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
index e44e95467..6e11a111b 100644
--- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml
@@ -1,6 +1,10 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
-- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml
+- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml
diff --git a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
index 53ee68db9..30feabab3 100644
--- a/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-registry-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
index f8c267569..2630fb234 100644
--- a/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-router-certificates.yml
@@ -1,4 +1,8 @@
---
+- include: initialize_groups.yml
+ tags:
+ - always
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml
new file mode 100644
index 000000000..6f95b4e2d
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/service-catalog.yml
@@ -0,0 +1,18 @@
+---
+#
+# This playbook is a preview of upcoming changes for installing
+# Hosted logging on. See inventory/byo/hosts.*.example for the
+# currently supported method.
+#
+- include: initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/service_catalog.yml
+ vars:
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_debug_level: "{{ debug_level | default(2) }}"
diff --git a/playbooks/byo/openshift-cluster/upgrades/README.md b/playbooks/byo/openshift-cluster/upgrades/README.md
index 0425ba518..0f64f40f3 100644
--- a/playbooks/byo/openshift-cluster/upgrades/README.md
+++ b/playbooks/byo/openshift-cluster/upgrades/README.md
@@ -4,5 +4,6 @@ cluster. Additional notes for the associated upgrade playbooks are
provided in their respective directories.
# Upgrades available
-- [OpenShift Enterprise 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift origin from 1.4.x to 1.5.x)
-- [OpenShift Enterprise 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift origin from 1.3.x to 1.4.x)
+- [OpenShift Container Platform 3.5 to 3.6](v3_6/README.md) (works also to upgrade OpenShift Origin from 1.5.x to 3.6.x)
+- [OpenShift Container Platform 3.4 to 3.5](v3_5/README.md) (works also to upgrade OpenShift Origin from 1.4.x to 1.5.x)
+- [OpenShift Container Platform 3.3 to 3.4](v3_4/README.md) (works also to upgrade OpenShift Origin from 1.3.x to 1.4.x)
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
deleted file mode 100644
index 4ee6afe2a..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/docker/docker_upgrade.yml
+++ /dev/null
@@ -1,62 +0,0 @@
----
-- name: Check for appropriate Docker versions
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- roles:
- - openshift_facts
- tasks:
- - set_fact:
- repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}"
-
- - fail:
- msg: Cannot upgrade Docker on Atomic operating systems.
- when: openshift.common.is_atomic | bool
-
- - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool
-
-
-# If a node fails, halt everything, the admin will need to clean up and we
-# don't want to carry on, potentially taking out every node. The playbook can safely be re-run
-# and will not take any action on a node already running the requested docker version.
-- name: Drain and upgrade nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- serial: 1
- any_errors_fatal: true
-
- roles:
- - lib_openshift
-
- tasks:
- - name: Mark node unschedulable
- oadm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: False
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_unschedulable
- until: node_unschedulable|succeeded
- when:
- - l_docker_upgrade is defined
- - l_docker_upgrade | bool
- - inventory_hostname in groups.oo_nodes_to_upgrade
-
- - name: Drain Node for Kubelet upgrade
- command: >
- {{ openshift.common.admin_binary }} drain {{ openshift.node.nodename }} --force --delete-local-data --ignore-daemonsets
- delegate_to: "{{ groups.oo_first_master.0 }}"
- when: l_docker_upgrade is defined and l_docker_upgrade | bool and inventory_hostname in groups.oo_nodes_to_upgrade
-
- - include: ../../../../common/openshift-cluster/upgrades/docker/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool
-
- - name: Set node schedulability
- oadm_manage_node:
- node: "{{ openshift.node.nodename | lower }}"
- schedulable: True
- delegate_to: "{{ groups.oo_first_master.0 }}"
- retries: 10
- delay: 5
- register: node_schedulable
- until: node_schedulable|succeeded
- when: node_unschedulable|changed
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh b/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh
deleted file mode 120000
index d5d864b63..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/docker/nuke_images.sh
+++ /dev/null
@@ -1 +0,0 @@
-../../../../common/openshift-cluster/upgrades/files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/roles b/playbooks/byo/openshift-cluster/upgrades/docker/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/docker/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
index d5fd7c424..7f31e26e1 100644
--- a/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/docker/upgrade.yml
@@ -1,37 +1,5 @@
---
# Playbook to upgrade Docker to the max allowable version for an OpenShift cluster.
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+- include: ../../initialize_groups.yml
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../cluster_hosts.yml
-
-- include: ../../../../common/openshift-cluster/evaluate_groups.yml
- vars:
- # Do not allow adding hosts during upgrade.
- g_new_master_hosts: []
- g_new_node_hosts: []
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_deployment_type: "{{ deployment_type }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
-
-- include: docker_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/docker/docker_upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
index 106dcc12d..5bd5d64ab 100644
--- a/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/upgrade_etcd.yml
@@ -1,26 +1,6 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+- include: ../initialize_groups.yml
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../cluster_hosts.yml
+- include: ../../../common/openshift-cluster/evaluate_groups.yml
- include: ../../../common/openshift-cluster/upgrades/etcd/main.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles b/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
index b1510e062..697a18c4d 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -2,106 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index b61d9e58a..4d284c279 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -11,101 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_3/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index f0b2a2c75..180a2821f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -4,103 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
- openshift_upgrade_min: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
- vars:
- node_config_hook: "v3_3/node_config_upgrade.yml"
+- include: ../../../../common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles b/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
index 82a1d0935..8cce91b3f 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -2,104 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 7ae1b3e6e..8e5d0f5f9 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -11,101 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
- vars:
- master_config_hook: "v3_4/master_config_upgrade.yml"
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index ec63ea60e..d5329b858 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -4,101 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
- openshift_upgrade_min: "{{ '1.3' if deployment_type == 'origin' else '3.3' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles b/playbooks/byo/openshift-cluster/upgrades/v3_5/roles
deleted file mode 120000
index 6bc1a7aef..000000000
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
index 69cabcd33..f44d55ad2 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -2,104 +2,6 @@
#
# Full Control Plane + Nodes Upgrade
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-- name: Configure the upgrade target for the common upgrade tasks
- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos and initialize facts on all hosts
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 719057d2b..2377713fa 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -11,101 +11,6 @@
#
# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on control plane hosts
- hosts: oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config
- tags:
- - pre_upgrade
- roles:
- - openshift_repos
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-master/validate_restart.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_masters_to_config:oo_etcd_to_config
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/post_control_plane.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index 259be6f8e..5b3f6ab06 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -4,101 +4,6 @@
#
# Upgrades nodes only, but requires the control plane to have already been upgraded.
#
-- include: ../../../../common/openshift-cluster/upgrades/init.yml
- tags:
- - pre_upgrade
+- include: ../../initialize_groups.yml
-# Configure the upgrade target for the common upgrade tasks:
-- hosts: l_oo_all_hosts
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_upgrade_target: "{{ '1.5' if deployment_type == 'origin' else '3.5' }}"
- openshift_upgrade_min: "{{ '1.4' if deployment_type == 'origin' else '3.4' }}"
-
-# Pre-upgrade
-- include: ../../../../common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
- tags:
- - pre_upgrade
-
-- name: Update repos on nodes
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config
- roles:
- - openshift_repos
- tags:
- - pre_upgrade
-
-- name: Set openshift_no_proxy_internal_hostnames
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
- tags:
- - pre_upgrade
- tasks:
- - set_fact:
- openshift_no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_upgrade']
- | union(groups['oo_masters_to_config'])
- | union(groups['oo_etcd_to_config'] | default([])))
- | oo_collect('openshift.common.hostname') | default([]) | join (',')
- }}"
- when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
- openshift_generate_no_proxy_hosts | default(True) | bool }}"
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_inventory_vars.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/disable_excluder.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/initialize_openshift_version.yml
- tags:
- - pre_upgrade
- vars:
- # Request specific openshift_release and let the openshift_version role handle converting this
- # to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
- # defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "{{ openshift_upgrade_target }}"
- openshift_protect_installed_version: False
-
- # We skip the docker role at this point in upgrade to prevent
- # unintended package, container, or config upgrades which trigger
- # docker restarts. At this early stage of upgrade we can assume
- # docker is configured and running.
- skip_docker_role: True
-
-- name: Verify masters are already upgraded
- hosts: oo_masters_to_config
- tags:
- - pre_upgrade
- tasks:
- - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."
- when: openshift.common.version != openshift_version
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
- tags:
- - pre_upgrade
-
-- include: ../../../../common/openshift-cluster/upgrades/pre/gate_checks.yml
- tags:
- - pre_upgrade
-
-# Pre-upgrade completed, nothing after this should be tagged pre_upgrade.
-
-# Separate step so we can execute in parallel and clear out anything unused
-# before we get into the serialized upgrade process which will then remove
-# remaining images if possible.
-- name: Cleanup unused Docker images
- hosts: oo_nodes_to_upgrade
- tasks:
- - include: ../../../../common/openshift-cluster/upgrades/cleanup_unused_images.yml
-
-- include: ../../../../common/openshift-cluster/upgrades/upgrade_nodes.yml
+- include: ../../../../common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
new file mode 100644
index 000000000..797af671a
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/README.md
@@ -0,0 +1,20 @@
+# v3.6 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
new file mode 100644
index 000000000..40120b3e8
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -0,0 +1,7 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../initialize_groups.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
new file mode 100644
index 000000000..408a4c631
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -0,0 +1,16 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../initialize_groups.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
new file mode 100644
index 000000000..b5f42b804
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -0,0 +1,9 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../initialize_groups.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
new file mode 100644
index 000000000..4bf53be81
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/README.md
@@ -0,0 +1,20 @@
+# v3.6 Major and Minor Upgrade Playbook
+
+## Overview
+This playbook currently performs the following steps.
+
+ * Upgrade and restart master services
+ * Unschedule node
+ * Upgrade and restart docker
+ * Upgrade and restart node services
+ * Modifies the subset of the configuration necessary
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+## Usage
+
+```
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
+```
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
new file mode 100644
index 000000000..e41c29682
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -0,0 +1,7 @@
+---
+#
+# Full Control Plane + Nodes Upgrade
+#
+- include: ../../initialize_groups.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
new file mode 100644
index 000000000..21e0fd815
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -0,0 +1,16 @@
+---
+#
+# Control Plane Upgrade Playbook
+#
+# Upgrades masters and Docker (only on standalone etcd hosts)
+#
+# This upgrade does not include:
+# - node service running on masters
+# - docker running on masters
+# - node service running on dedicated nodes
+#
+# You can run the upgrade_nodes.yml playbook after this to upgrade these components separately.
+#
+- include: ../../initialize_groups.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
new file mode 100644
index 000000000..0e09d996e
--- /dev/null
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
@@ -0,0 +1,9 @@
+---
+#
+# Node Upgrade Playbook
+#
+# Upgrades nodes only, but requires the control plane to have already been upgraded.
+#
+- include: ../../initialize_groups.yml
+
+- include: ../../../../common/openshift-cluster/upgrades/v3_7/upgrade_nodes.yml
diff --git a/playbooks/byo/openshift-etcd/config.yml b/playbooks/byo/openshift-etcd/config.yml
new file mode 100644
index 000000000..1342bd60c
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-etcd/config.yml
diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml
new file mode 100644
index 000000000..2dec2bef6
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/migrate.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-etcd/migrate.yml
diff --git a/playbooks/byo/openshift-etcd/restart.yml b/playbooks/byo/openshift-etcd/restart.yml
index 6713f07e3..034bba4b4 100644
--- a/playbooks/byo/openshift-etcd/restart.yml
+++ b/playbooks/byo/openshift-etcd/restart.yml
@@ -1,8 +1,6 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-etcd/restart.yml
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-etcd/scaleup.yml b/playbooks/byo/openshift-etcd/scaleup.yml
new file mode 100644
index 000000000..a2a5856a9
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/scaleup.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-etcd/scaleup.yml
diff --git a/playbooks/byo/openshift-glusterfs/README.md b/playbooks/byo/openshift-glusterfs/README.md
new file mode 100644
index 000000000..f62aea229
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/README.md
@@ -0,0 +1,98 @@
+# OpenShift GlusterFS Playbooks
+
+These playbooks are intended to enable the use of GlusterFS volumes by pods in
+OpenShift. While they try to provide a sane set of defaults they do cover a
+variety of scenarios and configurations, so read carefully. :)
+
+## Playbook: config.yml
+
+This is the main playbook that integrates GlusterFS into a new or existing
+OpenShift cluster. It will also, if specified, configure a hosted Docker
+registry with GlusterFS backend storage.
+
+This playbook requires the `glusterfs` group to exist in the Ansible inventory
+file. The hosts in this group are the nodes of the GlusterFS cluster.
+
+ * If this is a newly configured cluster each host must have a
+ `glusterfs_devices` variable defined, each of which must be a list of block
+ storage devices intended for use only by the GlusterFS cluster. If this is
+ also an external GlusterFS cluster, you must specify
+ `openshift_storage_glusterfs_is_native=False`. If the cluster is to be
+ managed by an external heketi service you must also specify
+ `openshift_storage_glusterfs_heketi_is_native=False` and
+ `openshift_storage_glusterfs_heketi_url=<URL>` with the URL to the heketi
+ service. All these variables are specified in `[OSEv3:vars]`,
+ * If this is an existing cluster you do not need to specify a list of block
+ devices but you must specify the following variables in `[OSEv3:vars]`:
+ * `openshift_storage_glusterfs_is_missing=False`
+ * `openshift_storage_glusterfs_heketi_is_missing=False`
+
+By default, pods for a native GlusterFS cluster will be created in the
+`default` namespace. To change this, specify
+`openshift_storage_glusterfs_namespace=<other namespace>` in `[OSEv3:vars]`.
+
+To configure the deployment of a Docker registry with GlusterFS backend
+storage, specify `openshift_hosted_registry_storage_kind=glusterfs` in
+`[OSEv3:vars]`. To create a separate GlusterFS cluster for use only by the
+registry, specify a `glusterfs_registry` group that is populated as the
+`glusterfs` is with the nodes for the separate cluster. If no
+`glusterfs_registry` group is specified, the cluster defined by the `glusterfs`
+group will be used.
+
+To swap an existing hosted registry's backend storage for a GlusterFS volume,
+specify `openshift_hosted_registry_storage_glusterfs_swap=True`. To
+additoinally copy any existing contents from an existing hosted registry,
+specify `openshift_hosted_registry_storage_glusterfs_swapcopy=True`.
+
+**NOTE:** For each namespace that is to have access to GlusterFS volumes an
+Enpoints resource pointing to the GlusterFS cluster nodes and a corresponding
+Service resource must be created. If dynamic provisioning using StorageClasses
+is configure, these resources are created automatically in the namespaces that
+require them. This playbook also takes care of creating these resources in the
+namespaces used for deployment.
+
+An example of a minimal inventory file:
+```
+[OSEv3:children]
+masters
+nodes
+glusterfs
+
+[OSEv3:vars]
+ansible_ssh_user=root
+deployment_type=origin
+
+[masters]
+master
+
+[nodes]
+node0
+node1
+node2
+
+[glusterfs]
+node0 glusterfs_devices='[ "/dev/sdb" ]'
+node1 glusterfs_devices='[ "/dev/sdb", "/dev/sdc" ]'
+node2 glusterfs_devices='[ "/dev/sdd" ]'
+```
+
+## Playbook: registry.yml
+
+This playbook is intended for admins who want to deploy a hosted Docker
+registry with GlusterFS backend storage on an existing OpenShift cluster. It
+has all the same requirements and behaviors as `config.yml`.
+
+## Role: openshift_storage_glusterfs
+
+The bulk of the work is done by the `openshift_storage_glusterfs` role. This
+role can handle the deployment of GlusterFS (if it is to be hosted on the
+OpenShift cluster), the registration of GlusterFS nodes (hosted or standalone),
+and (if specified) integration as backend storage for a hosted Docker registry.
+
+See the documentation in the role's directory for further details.
+
+## Role: openshift_hosted
+
+The `openshift_hosted` role recognizes `glusterfs` as a possible storage
+backend for a hosted docker registry. It will also, if configured, handle the
+swap of an existing registry's backend storage to a GlusterFS volume.
diff --git a/playbooks/byo/openshift-glusterfs/config.yml b/playbooks/byo/openshift-glusterfs/config.yml
new file mode 100644
index 000000000..3f11f3991
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/config.yml
@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-glusterfs/config.yml
diff --git a/playbooks/byo/openshift-glusterfs/filter_plugins b/playbooks/byo/openshift-glusterfs/filter_plugins
new file mode 120000
index 000000000..99a95e4ca
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-glusterfs/lookup_plugins b/playbooks/byo/openshift-glusterfs/lookup_plugins
new file mode 120000
index 000000000..ac79701db
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins \ No newline at end of file
diff --git a/playbooks/byo/openshift-glusterfs/registry.yml b/playbooks/byo/openshift-glusterfs/registry.yml
new file mode 100644
index 000000000..6ee6febdb
--- /dev/null
+++ b/playbooks/byo/openshift-glusterfs/registry.yml
@@ -0,0 +1,10 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-cluster/std_include.yml
+ tags:
+ - always
+
+- include: ../../common/openshift-glusterfs/registry.yml
diff --git a/playbooks/byo/openshift-preflight/roles b/playbooks/byo/openshift-glusterfs/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-preflight/roles
+++ b/playbooks/byo/openshift-glusterfs/roles
diff --git a/playbooks/byo/openshift-master/config.yml b/playbooks/byo/openshift-master/config.yml
new file mode 100644
index 000000000..98be0c448
--- /dev/null
+++ b/playbooks/byo/openshift-master/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-master/config.yml
diff --git a/playbooks/byo/openshift-master/restart.yml b/playbooks/byo/openshift-master/restart.yml
index 2d20f69f4..8950efd00 100644
--- a/playbooks/byo/openshift-master/restart.yml
+++ b/playbooks/byo/openshift-master/restart.yml
@@ -1,8 +1,6 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-master/restart.yml
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-master/scaleup.yml b/playbooks/byo/openshift-master/scaleup.yml
index 7075bb59e..e3ef704e5 100644
--- a/playbooks/byo/openshift-master/scaleup.yml
+++ b/playbooks/byo/openshift-master/scaleup.yml
@@ -1,30 +1,23 @@
---
-- name: Create initial host groups for localhost
+- include: ../openshift-cluster/initialize_groups.yml
+
+- name: Ensure there are new_masters
hosts: localhost
connection: local
become: no
gather_facts: no
- tags:
- - always
tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+ - fail:
+ msg: >
+ Detected no new_masters or no new_nodes in inventory. Please
+ add hosts to the new_masters and new_nodes host groups to add
+ masters.
+ when:
+ - (g_new_master_hosts | default([]) | length == 0) or (g_new_node_hosts | default([]) | length == 0)
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: ../../common/openshift-cluster/std_include.yml
- include: ../../common/openshift-master/scaleup.yml
vars:
openshift_cluster_id: "{{ cluster_id | default('default') }}"
openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/config.yml b/playbooks/byo/openshift-node/config.yml
new file mode 100644
index 000000000..839dc36ff
--- /dev/null
+++ b/playbooks/byo/openshift-node/config.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/byo/openshift-node/network_manager.yml b/playbooks/byo/openshift-node/network_manager.yml
index 9bb3ea17f..b23692237 100644
--- a/playbooks/byo/openshift-node/network_manager.yml
+++ b/playbooks/byo/openshift-node/network_manager.yml
@@ -1,42 +1,4 @@
---
-- name: Create initial host groups for localhost
- hosts: localhost
- connection: local
- become: no
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+- include: ../openshift-cluster/initialize_groups.yml
-- name: Install and configure NetworkManager
- hosts: l_oo_all_hosts
- become: yes
- tasks:
- - name: install NetworkManager
- package:
- name: 'NetworkManager'
- state: present
-
- - name: configure NetworkManager
- lineinfile:
- dest: "/etc/sysconfig/network-scripts/ifcfg-{{ ansible_default_ipv4['interface'] }}"
- regexp: '^{{ item }}='
- line: '{{ item }}=yes'
- state: present
- create: yes
- with_items:
- - 'USE_PEERDNS'
- - 'NM_CONTROLLED'
-
- - name: enable and start NetworkManager
- service:
- name: 'NetworkManager'
- state: started
- enabled: yes
+- include: ../../common/openshift-node/network_manager.yml
diff --git a/playbooks/byo/openshift-node/restart.yml b/playbooks/byo/openshift-node/restart.yml
index 3985a83bb..ccf9e82da 100644
--- a/playbooks/byo/openshift-node/restart.yml
+++ b/playbooks/byo/openshift-node/restart.yml
@@ -1,8 +1,6 @@
---
+- include: ../openshift-cluster/initialize_groups.yml
+
- include: ../../common/openshift-cluster/std_include.yml
- tags:
- - always
- include: ../../common/openshift-node/restart.yml
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 2b10b6c76..e0c36fb69 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -1,32 +1,19 @@
---
-- name: Create initial host groups for localhost
+- include: ../openshift-cluster/initialize_groups.yml
+
+- name: Ensure there are new_nodes
hosts: localhost
connection: local
become: no
gather_facts: no
- tags:
- - always
tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
- - name: Evaluate group l_oo_all_hosts
- add_host:
- name: "{{ item }}"
- groups: l_oo_all_hosts
- with_items: "{{ g_all_hosts | default([]) }}"
- changed_when: False
+ - fail:
+ msg: >
+ Detected no new_nodes in inventory. Please add hosts to the
+ new_nodes host group to add nodes.
+ when:
+ - g_new_node_hosts | default([]) | length == 0
-- name: Create initial host groups for all hosts
- hosts: l_oo_all_hosts
- gather_facts: no
- tags:
- - always
- tasks:
- - include_vars: ../../byo/openshift-cluster/cluster_hosts.yml
+- include: ../../common/openshift-cluster/std_include.yml
-- include: ../../common/openshift-node/scaleup.yml
- vars:
- openshift_cluster_id: "{{ cluster_id | default('default') }}"
- openshift_debug_level: "{{ debug_level | default(2) }}"
- openshift_deployment_type: "{{ deployment_type }}"
- openshift_master_etcd_hosts: "{{ groups.etcd | default([]) }}"
- openshift_master_etcd_port: 2379
+- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md
deleted file mode 100644
index b50292eac..000000000
--- a/playbooks/byo/openshift-preflight/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# OpenShift preflight checks
-
-Here we provide an Ansible playbook for detecting potential roadblocks prior to
-an install or upgrade.
-
-Ansible's default operation mode is to fail fast, on the first error. However,
-when performing checks, it is useful to gather as much information about
-problems as possible in a single run.
-
-The `check.yml` playbook runs a battery of checks against the inventory hosts
-and tells Ansible to ignore intermediate errors, thus giving a more complete
-diagnostic of the state of each host. Still, if any check failed, the playbook
-run will be marked as having failed.
-
-To facilitate understanding the problems that were encountered, we provide a
-custom callback plugin to summarize execution errors at the end of a playbook
-run.
-
----
-
-*Note that currently the `check.yml` playbook is only useful for RPM-based
-installations. Containerized installs are excluded from checks for now, but
-might be included in the future if there is demand for that.*
-
----
-
-## Running
-
-With an installation of Ansible 2.2 or greater, run the playbook directly
-against your inventory file. Here is the step-by-step:
-
-1. If you haven't done it yet, clone this repository:
-
- ```console
- $ git clone https://github.com/openshift/openshift-ansible
- $ cd openshift-ansible
- ```
-
-2. Run the playbook:
-
- ```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml
- ```
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
index c5f05d0f0..2e53452a6 100644
--- a/playbooks/byo/openshift-preflight/check.yml
+++ b/playbooks/byo/openshift-preflight/check.yml
@@ -1,12 +1,3 @@
---
-- hosts: OSEv3
- name: run OpenShift health checks
- roles:
- - openshift_health_checker
- post_tasks:
- # NOTE: we need to use the old "action: name" syntax until
- # https://github.com/ansible/ansible/issues/20513 is fixed.
- - action: openshift_health_check
- args:
- checks:
- - '@preflight'
+# location is moved; this file remains so existing instructions keep working
+- include: ../openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index a21b6a0a5..a8c1c3a88 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,9 +1,14 @@
---
+- include: openshift-cluster/initialize_groups.yml
+ tags:
+ - always
+
- include: ../common/openshift-cluster/std_include.yml
tags:
- always
- name: Gather Cluster facts
+ # Temporarily reverting to OSEv3 until group standardization is complete
hosts: OSEv3
roles:
- openshift_facts
diff --git a/playbooks/byo/rhel_subscribe.yml b/playbooks/byo/rhel_subscribe.yml
index 65c0b1c01..1b14ff32e 100644
--- a/playbooks/byo/rhel_subscribe.yml
+++ b/playbooks/byo/rhel_subscribe.yml
@@ -1,12 +1,11 @@
---
-- include: ../common/openshift-cluster/std_include.yml
+- include: openshift-cluster/initialize_groups.yml
tags:
- always
- name: Subscribe hosts, update repos and update OS packages
- hosts: l_oo_all_hosts
- vars:
- openshift_deployment_type: "{{ deployment_type }}"
+ # Temporarily reverting to OSEv3 until group standardization is complete
+ hosts: OSEv3
roles:
- role: rhel_subscribe
when: deployment_type in ['atomic-enterprise', 'enterprise', 'openshift-enterprise'] and
diff --git a/playbooks/byo/vagrant.yml b/playbooks/byo/vagrant.yml
deleted file mode 100644
index 76246e7b0..000000000
--- a/playbooks/byo/vagrant.yml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-- include: rhel_subscribe.yml
-
-- include: config.yml