summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--DEPLOYMENT_TYPES.md2
-rw-r--r--README.md4
-rw-r--r--README_AEP.md2
-rw-r--r--filter_plugins/oo_filters.py2
-rw-r--r--inventory/byo/hosts.aep.example12
-rw-r--r--inventory/byo/hosts.origin.example13
-rw-r--r--inventory/byo/hosts.ose.example13
-rw-r--r--lookup_plugins/oo_option.py27
-rw-r--r--openshift-ansible.spec47
-rw-r--r--playbooks/aws/openshift-cluster/config.yml5
-rw-r--r--playbooks/byo/openshift-cluster/config.yml4
-rw-r--r--playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml2
-rw-r--r--playbooks/byo/openshift_facts.yml2
-rw-r--r--playbooks/common/openshift-cluster/additional_config.yml2
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml2
l---------playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/verify_ansible_version.yml10
-rw-r--r--playbooks/common/openshift-master/config.yml106
-rw-r--r--playbooks/common/openshift-node/config.yml70
-rw-r--r--playbooks/gce/openshift-cluster/config.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml5
-rw-r--r--playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml26
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/domain.xml3
-rw-r--r--playbooks/libvirt/openshift-cluster/terminate.yml12
-rw-r--r--playbooks/libvirt/openshift-cluster/update.yml4
-rw-r--r--playbooks/openstack/openshift-cluster/config.yml4
-rw-r--r--roles/etcd/tasks/main.yml2
-rw-r--r--roles/openshift_ca/README.md48
-rw-r--r--roles/openshift_ca/meta/main.yml (renamed from roles/openshift_master_ca/meta/main.yml)8
-rw-r--r--roles/openshift_ca/vars/main.yml6
-rw-r--r--roles/openshift_cli/library/openshift_container_binary_sync.py131
-rw-r--r--roles/openshift_cli/tasks/main.yml22
-rw-r--r--roles/openshift_cli/templates/openshift.j228
-rw-r--r--roles/openshift_cluster_metrics/README.md36
-rw-r--r--roles/openshift_cluster_metrics/files/cluster-metrics/grafana.yaml53
-rw-r--r--roles/openshift_cluster_metrics/files/cluster-metrics/heapster-serviceaccount.yaml4
-rw-r--r--roles/openshift_cluster_metrics/files/cluster-metrics/heapster.yaml30
-rw-r--r--roles/openshift_cluster_metrics/files/cluster-metrics/influxdb.yaml67
-rw-r--r--roles/openshift_cluster_metrics/tasks/main.yml49
-rw-r--r--roles/openshift_examples/README.md2
-rwxr-xr-xroles/openshift_examples/examples-sync.sh1
-rw-r--r--roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json17
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json4
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/django.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json255
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json6
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs.json2
-rw-r--r--roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json4
-rw-r--r--roles/openshift_examples/tasks/main.yml43
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py18
-rw-r--r--roles/openshift_facts/tasks/main.yml5
-rw-r--r--roles/openshift_hosted/tasks/registry/registry.yml2
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml6
l---------roles/openshift_hosted/tasks/registry/storage/registry_config.j21
l---------roles/openshift_hosted/tasks/registry/storage/registry_config_secret.j21
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml17
-rw-r--r--roles/openshift_master/meta/main.yml5
-rw-r--r--roles/openshift_master/tasks/main.yml3
-rw-r--r--roles/openshift_master_ca/README.md34
-rw-r--r--roles/openshift_master_ca/tasks/main.yml24
-rw-r--r--roles/openshift_master_ca/vars/main.yml5
-rw-r--r--roles/openshift_master_certificates/README.md29
-rw-r--r--roles/openshift_master_certificates/meta/main.yml6
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml125
-rw-r--r--roles/openshift_master_certificates/vars/main.yml2
-rw-r--r--roles/openshift_node/meta/main.yml3
-rw-r--r--roles/openshift_node/tasks/main.yml8
-rw-r--r--roles/openshift_node/tasks/storage_plugins/glusterfs.yml30
-rw-r--r--roles/openshift_node/tasks/storage_plugins/nfs.yml16
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j21
-rw-r--r--roles/openshift_node/templates/partials/kubeletArguments.j25
-rw-r--r--roles/openshift_node_certificates/README.md33
-rw-r--r--roles/openshift_node_certificates/meta/main.yml6
-rw-r--r--roles/openshift_node_certificates/tasks/main.yml119
-rw-r--r--roles/openshift_node_certificates/vars/main.yml14
-rw-r--r--roles/openshift_version/tasks/main.yml9
-rw-r--r--utils/src/ooinstall/cli_installer.py146
-rw-r--r--utils/src/ooinstall/oo_config.py199
-rw-r--r--utils/src/ooinstall/openshift_ansible.py87
-rw-r--r--utils/test/cli_installer_tests.py453
-rw-r--r--utils/test/fixture.py55
-rw-r--r--utils/test/oo_config_tests.py195
86 files changed, 1692 insertions, 1194 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 5976f188e..25b2d679c 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.3.2-1 ./
+3.3.3-1 ./
diff --git a/DEPLOYMENT_TYPES.md b/DEPLOYMENT_TYPES.md
index 1f64e223a..668d14fc0 100644
--- a/DEPLOYMENT_TYPES.md
+++ b/DEPLOYMENT_TYPES.md
@@ -15,7 +15,7 @@ The table below outlines the defaults per `deployment_type`.
| **Image Streams** | centos | rhel + xpaas | N/A | rhel |
-**NOTE** `enterprise` deloyment type is used for OpenShift Enterprise version
+**NOTE** `enterprise` deployment type is used for OpenShift Enterprise version
3.0.x OpenShift Enterprise deployments utilizing version 3.1 and later will
make use of the new `openshift-enterprise` deployment type. Additional work to
migrate between the two will be forthcoming.
diff --git a/README.md b/README.md
index 6d38c9945..4fa0286a8 100644
--- a/README.md
+++ b/README.md
@@ -11,11 +11,11 @@ they may in the future.
- Install base dependencies:
- Fedora:
```
- dnf install -y ansible-1.9.4 pyOpenSSL python-cryptography
+ dnf install -y ansible-2.1.0.0 pyOpenSSL python-cryptography
```
- OSX:
```
- # Install ansible 1.9.4 and python 2
+ # Install ansible 2.1.0.0 and python 2
brew install ansible python
```
- Setup for a specific cloud:
diff --git a/README_AEP.md b/README_AEP.md
index 1b926f2ab..c588ebbd3 100644
--- a/README_AEP.md
+++ b/README_AEP.md
@@ -10,7 +10,7 @@
* [Overriding detected ip addresses and hostnames](#overriding-detected-ip-addresses-and-hostnames)
## Requirements
-* ansible 1.9.4
+* ansible 2.1.0.0
* Available in Fedora channels
* Available for EL with EPEL and Optional channel
* One or more RHEL 7.1 VMs
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index d706d0304..ec00a1646 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -660,7 +660,7 @@ class FilterModule(object):
if kind == 'nfs':
host = params['host']
if host == None:
- if len(groups['oo_nfs_to_config']) > 0:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
host = groups['oo_nfs_to_config'][0]
else:
raise errors.AnsibleFilterError("|failed no storage host detected")
diff --git a/inventory/byo/hosts.aep.example b/inventory/byo/hosts.aep.example
index e038b39d5..8d2d95f8f 100644
--- a/inventory/byo/hosts.aep.example
+++ b/inventory/byo/hosts.aep.example
@@ -44,9 +44,6 @@ openshift_release=v3.2
# Install the openshift examples
#openshift_install_examples=true
-# Enable cluster metrics
-#use_cluster_metrics=true
-
# Configure logoutURL in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
#openshift_master_logout_url=http://example.com
@@ -326,11 +323,12 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Disable the OpenShift SDN plugin
# openshift_use_openshift_sdn=False
-# Configure SDN cluster network CIDR block. This network block should
-# be a private block and should not conflict with existing network
-# blocks in your infrastructure that pods may require access to.
-# Can not be changed after deployment.
+# Configure SDN cluster network and kubernetes service CIDR blocks. These
+# network blocks should be private and should not conflict with network blocks
+# in your infrastructure that pods may require access to. Can not be changed
+# after deployment.
#osm_cluster_network_cidr=10.1.0.0/16
+#openshift_portal_net=172.30.0.0/16
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 5bfb451c5..d71ed5727 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -45,9 +45,6 @@ openshift_release=v1.2
# Install the openshift examples
#openshift_install_examples=true
-# Enable cluster metrics
-#use_cluster_metrics=true
-
# Configure logoutURL in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
#openshift_master_logout_url=http://example.com
@@ -333,11 +330,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Disable the OpenShift SDN plugin
# openshift_use_openshift_sdn=False
-# Configure SDN cluster network CIDR block. This network block should
-# be a private block and should not conflict with existing network
-# blocks in your infrastructure that pods may require access to.
-# Can not be changed after deployment.
+# Configure SDN cluster network and kubernetes service CIDR blocks. These
+# network blocks should be private and should not conflict with network blocks
+# in your infrastructure that pods may require access to. Can not be changed
+# after deployment.
#osm_cluster_network_cidr=10.1.0.0/16
+#openshift_portal_net=172.30.0.0/16
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index 96a9db30d..ccff97b47 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -44,9 +44,6 @@ openshift_release=v3.2
# Install the openshift examples
#openshift_install_examples=true
-# Enable cluster metrics
-#use_cluster_metrics=true
-
# Configure logoutURL in the master config for console customization
# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
#openshift_master_logout_url=http://example.com
@@ -327,11 +324,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Disable the OpenShift SDN plugin
# openshift_use_openshift_sdn=False
-# Configure SDN cluster network CIDR block. This network block should
-# be a private block and should not conflict with existing network
-# blocks in your infrastructure that pods may require access to.
-# Can not be changed after deployment.
+# Configure SDN cluster network and kubernetes service CIDR blocks. These
+# network blocks should be private and should not conflict with network blocks
+# in your infrastructure that pods may require access to. Can not be changed
+# after deployment.
#osm_cluster_network_cidr=10.1.0.0/16
+#openshift_portal_net=172.30.0.0/16
+
# Configure number of bits to allocate to each host’s subnet e.g. 8
# would mean a /24 network on the host.
diff --git a/lookup_plugins/oo_option.py b/lookup_plugins/oo_option.py
index 3fc46ab9b..bca545771 100644
--- a/lookup_plugins/oo_option.py
+++ b/lookup_plugins/oo_option.py
@@ -33,15 +33,6 @@ except ImportError:
def get_basedir(self, variables):
return self.basedir
-# pylint: disable=no-name-in-module,import-error
-try:
- # ansible-2.0
- from ansible import template
-except ImportError:
- # ansible 1.9.x
- from ansible.utils import template
-
-
# Reason: disable too-few-public-methods because the `run` method is the only
# one required by the Ansible API
# Status: permanently disabled
@@ -65,28 +56,16 @@ class LookupModule(LookupBase):
# which is not used
# Status: permanently disabled unless Ansible API evolves
# pylint: disable=unused-argument
- def run(self, terms, inject=None, **kwargs):
+ def run(self, terms, variables, **kwargs):
''' Main execution path '''
- try:
- terms = template.template(self.basedir, terms, inject)
- # Reason: disable broad-except to really ignore any potential exception
- # This is inspired by the upstream "env" lookup plugin:
- # https://github.com/ansible/ansible/blob/devel/v1/ansible/runner/lookup_plugins/env.py#L29
- # pylint: disable=broad-except
- except Exception:
- pass
-
- if isinstance(terms, basestring):
- terms = [terms]
-
ret = []
for term in terms:
option_name = term.split()[0]
cli_key = 'cli_' + option_name
- if inject and cli_key in inject:
- ret.append(inject[cli_key])
+ if 'vars' in variables and cli_key in variables['vars']:
+ ret.append(variables['vars'][cli_key])
elif option_name in os.environ:
ret.append(os.environ[option_name])
else:
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 822070e43..22435f13d 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.3.2
+Version: 3.3.3
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -13,7 +13,7 @@ URL: https://github.com/openshift/openshift-ansible
Source0: https://github.com/openshift/openshift-ansible/archive/%{commit}/%{name}-%{version}.tar.gz
BuildArch: noarch
-Requires: ansible >= 1.9.4
+Requires: ansible >= 2.1.0.0
Requires: python2
Requires: openshift-ansible-docs = %{version}-%{release}
@@ -221,6 +221,49 @@ Atomic OpenShift Utilities includes
%changelog
+* Wed Jul 27 2016 Troy Dawson <tdawson@redhat.com> 3.3.3-1
+- Template named certificates with_items. (abutcher@redhat.com)
+- Replace master_cert_config_dir with common config_base fact.
+ (abutcher@redhat.com)
+- remove outdated openshift_cluster_metrics role (jdetiber@redhat.com)
+- Fix "deloyment" typo in deployment types doc (lxia@redhat.com)
+- Add missing nuke_images.sh symlink. (dgoodwin@redhat.com)
+- a-o-i: Persist Roles Variables (smunilla@redhat.com)
+- Default nodes matching selectors when not collected. (abutcher@redhat.com)
+- Copy openshift binaries instead of using wrapper script.
+ (dgoodwin@redhat.com)
+- Correct relative include for ansible version check. (abutcher@redhat.com)
+- Fix libvirt provider for Ansible 2.1.0.0 (lhuard@amadeus.com)
+- Re-arrange master and node role dependencies. (abutcher@redhat.com)
+- Refactor openshift certificates roles. (abutcher@redhat.com)
+- Check ansible version prior to evaluating cluster hosts and groups.
+ (abutcher@redhat.com)
+- Stop reporting changes when docker pull is already up to date.
+ (dgoodwin@redhat.com)
+- a-o-i: Write Role variable groups (smunilla@redhat.com)
+- Slight modification to error when using mismatched openshift_release.
+ (dgoodwin@redhat.com)
+- fix "databcase" typo in example roles (lxia@redhat.com)
+- Secure router only when openshift.hosted.router.certificate.contents exists.
+ (abutcher@redhat.com)
+- Add jenkinstemplate (sdodson@redhat.com)
+- Fix bugs with origin 1.2 rpm based upgrades. (dgoodwin@redhat.com)
+- Sync latest image streams and templates (sdodson@redhat.com)
+- Ensure 'oo_nfs_to_config' in groups prior to checking group length when nfs
+ host unset. (abutcher@redhat.com)
+- We have proper ansible support and requirements in place now, de-revert this
+ commit (tbielawa@redhat.com)
+- Skip docker upgrades on Atomic. (dgoodwin@redhat.com)
+- Resolve some deprecation warnings. (abutcher@redhat.com)
+- a-o-i: Looser facts requirements for unattended (smunilla@redhat.com)
+- Temporarily link registry config templates for ansible 1.9.x support.
+ (abutcher@redhat.com)
+- Remove relative lookup for registry config and check for skipped update in
+ registry redeploy conditional. (abutcher@redhat.com)
+- Arbitrary Installer yaml (smunilla@redhat.com)
+- Check for existence of sebooleans prior to setting. (abutcher@redhat.com)
+- Require ansible-2.1 (abutcher@redhat.com)
+
* Sun Jul 17 2016 Scott Dodson <sdodson@redhat.com> 3.3.2-1
- Convert openshift_release and openshift_version to strings for startswith
(sdodson@redhat.com)
diff --git a/playbooks/aws/openshift-cluster/config.yml b/playbooks/aws/openshift-cluster/config.yml
index 71ce9e787..647c72239 100644
--- a/playbooks/aws/openshift-cluster/config.yml
+++ b/playbooks/aws/openshift-cluster/config.yml
@@ -1,3 +1,6 @@
+---
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
gather_facts: no
tasks:
@@ -6,7 +9,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index c5479d098..0a931fbe0 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -1,4 +1,6 @@
---
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
connection: local
become: no
@@ -8,7 +10,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
index 8c89e118e..e28313221 100644
--- a/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
+++ b/playbooks/byo/openshift-cluster/upgrades/v3_2/upgrade.yml
@@ -1,4 +1,6 @@
---
+- include: ../../../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
connection: local
become: no
diff --git a/playbooks/byo/openshift_facts.yml b/playbooks/byo/openshift_facts.yml
index db8703db6..d966b58fd 100644
--- a/playbooks/byo/openshift_facts.yml
+++ b/playbooks/byo/openshift_facts.yml
@@ -1,4 +1,6 @@
---
+- include: ../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
connection: local
become: no
diff --git a/playbooks/common/openshift-cluster/additional_config.yml b/playbooks/common/openshift-cluster/additional_config.yml
index a34322754..e9fb3de96 100644
--- a/playbooks/common/openshift-cluster/additional_config.yml
+++ b/playbooks/common/openshift-cluster/additional_config.yml
@@ -19,8 +19,6 @@
- role: openshift_examples
registry_url: "{{ openshift.master.registry_url }}"
when: openshift.common.install_examples | bool
- - role: openshift_cluster_metrics
- when: openshift.common.use_cluster_metrics | bool
- role: openshift_manageiq
when: openshift.common.use_manageiq | bool
- role: cockpit
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index c5273b08f..3fb42a7fa 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -35,7 +35,7 @@
groups: oo_all_hosts
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
- with_items: g_all_hosts | default([])
+ with_items: "{{ g_all_hosts | default([]) }}"
- name: Evaluate oo_masters
add_host:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
new file mode 120000
index 000000000..49a51bba9
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/nuke_images.sh
@@ -0,0 +1 @@
+../files/nuke_images.sh \ No newline at end of file
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
index 7a8dfdf91..a32123952 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/pre.yml
@@ -87,7 +87,7 @@
# Request openshift_release 3.2 and let the openshift_version role handle converting this
# to a more specific version, respecting openshift_image_tag and openshift_pkg_version if
# defined, and overriding the normal behavior of protecting the installed version
- openshift_release: "3.2"
+ openshift_release: "{{ '1.2' if deployment_type == 'origin' else '3.2' }}"
openshift_protect_installed_version: False
# Docker role (a dependency) should be told not to do anything to installed version
# of docker, we handle this separately during upgrade. (the inventory may have a
@@ -163,6 +163,8 @@
- name: Verify containers are available for upgrade
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
when: openshift.common.is_containerized | bool
- name: Check latest available OpenShift RPM version
@@ -176,8 +178,12 @@
- name: Verify OpenShift 3.2 RPMs are available for upgrade
fail:
msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 3.2 or greater is required"
- when: not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare('3.2', '<')
+ when: deployment_type != 'origin' and not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ - name: Verify Origin 1.2 RPMs are available for upgrade
+ fail:
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but 1.2 or greater is required"
+ when: deployment_type == 'origin' and not openshift.common.is_containerized | bool and not avail_openshift_version | skipped and avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
# TODO: Are these two grep checks necessary anymore?
# Note: the version number is hardcoded here in hopes of catching potential
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
index 0ea315d0a..59cedc839 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_1_to_v3_2/upgrade.yml
@@ -84,10 +84,10 @@
# Only check if docker upgrade is required if docker_upgrade is not
# already set to False.
- include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
- include: ../docker/upgrade.yml
- when: l_docker_upgrade is defined and l_docker_upgrade | bool
+ when: l_docker_upgrade is defined and l_docker_upgrade | bool and not openshift.common.is_atomic | bool
- include: rpm_upgrade.yml
vars:
diff --git a/playbooks/common/openshift-cluster/verify_ansible_version.yml b/playbooks/common/openshift-cluster/verify_ansible_version.yml
new file mode 100644
index 000000000..2a143b065
--- /dev/null
+++ b/playbooks/common/openshift-cluster/verify_ansible_version.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tasks:
+ - name: Verify Ansible version is greater than or equal to 2.1.0.0
+ fail:
+ msg: "Unsupported ansible version: {{ ansible_version.full }} found"
+ when: not ansible_version.full | version_compare('2.1.0.0', 'ge')
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index bb8fb77b6..2f12b08c9 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -156,79 +156,6 @@
- master.etcd-ca.crt
when: etcd_client_certs_missing is defined and etcd_client_certs_missing
-- name: Determine if master certificates need to be generated
- hosts: oo_first_master:oo_masters_to_config
- tasks:
- - set_fact:
- openshift_master_certs_no_etcd:
- - admin.crt
- - master.kubelet-client.crt
- - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
- - master.server.crt
- - openshift-master.crt
- - openshift-registry.crt
- - openshift-router.crt
- - etcd.server.crt
- openshift_master_certs_etcd:
- - master.etcd-client.crt
-
- - set_fact:
- openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd)) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else openshift_master_certs_no_etcd }}"
-
- - name: Check status of master certificates
- stat:
- path: "{{ openshift.common.config_base }}/master/{{ item }}"
- with_items: "{{ openshift_master_certs }}"
- register: g_master_cert_stat_result
- - set_fact:
- master_certs_missing: "{{ False in (g_master_cert_stat_result.results
- | oo_collect(attribute='stat.exists')
- | list ) }}"
- master_cert_subdir: master-{{ openshift.common.hostname }}
- master_cert_config_dir: "{{ openshift.common.config_base }}/master"
-
-- name: Configure master certificates
- hosts: oo_first_master
- vars:
- master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
- masters_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'] | difference(groups['oo_first_master']))
- | oo_filter_list(filter_attr='master_certs_missing') }}"
- master_hostnames: "{{ hostvars
- | oo_select_keys(groups['oo_masters_to_config'])
- | oo_collect('openshift.common.all_hostnames')
- | oo_flatten | unique }}"
- sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
- roles:
- - openshift_master_certificates
- post_tasks:
- - name: Remove generated etcd client certs when using external etcd
- file:
- path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
- state: absent
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
- with_nested:
- - "{{ masters_needing_certs | default([]) }}"
- - - master.etcd-client.crt
- - master.etcd-client.key
-
- - name: Create a tarball of the master certs
- command: >
- tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
- -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
- args:
- creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- with_items: "{{ masters_needing_certs | default([]) }}"
-
- - name: Retrieve the master cert tarball from the master
- fetch:
- src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ masters_needing_certs | default([]) }}"
-
- name: Check for cached session secrets
hosts: oo_first_master
roles:
@@ -243,7 +170,7 @@
- name: Generate master session secrets
hosts: oo_first_master
vars:
- g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([]) and openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
+ g_session_secrets_present: "{{ (openshift.master.session_auth_secrets | default([])) | length > 0 and (openshift.master.session_encryption_secrets | default([])) | length > 0 }}"
g_session_auth_secrets: "{{ [ 24 | oo_generate_secret ] }}"
g_session_encryption_secrets: "{{ [ 24 | oo_generate_secret ] }}"
roles:
@@ -263,7 +190,7 @@
vars:
internal_hostnames: "{{ hostvars[groups.oo_first_master.0].openshift.common.internal_hostnames }}"
named_certificates: "{{ hostvars[groups.oo_first_master.0].openshift_master_named_certificates | default([]) }}"
- named_certificates_dir: "{{ hostvars[groups.oo_first_master.0].master_cert_config_dir }}/named_certificates/"
+ named_certificates_dir: "{{ hostvars[groups.oo_first_master.0].openshift.common.config_base }}/master/named_certificates/"
tasks:
- set_fact:
parsed_named_certificates: "{{ named_certificates | oo_parse_named_certificates(named_certificates_dir, internal_hostnames) }}"
@@ -272,7 +199,7 @@
- name: Deploy named certificates
hosts: oo_masters_to_config
vars:
- named_certs_dir: "{{ master_cert_config_dir }}/named_certificates/"
+ named_certs_dir: "{{ openshift.common.config_base }}/master/named_certificates/"
named_certs_specified: "{{ openshift_master_named_certificates is defined }}"
overwrite_named_certs: "{{ openshift_master_overwrite_named_certificates | default(false) }}"
roles:
@@ -297,17 +224,16 @@
when: named_certs_specified | bool
- name: Land named certificates
copy: src="{{ item.certfile }}" dest="{{ named_certs_dir }}"
- with_items: openshift_master_named_certificates
+ with_items: "{{ openshift_master_named_certificates }}"
when: named_certs_specified | bool
- name: Land named certificate keys
copy: src="{{ item.keyfile }}" dest="{{ named_certs_dir }}" mode=0600
- with_items: openshift_master_named_certificates
+ with_items: "{{ openshift_master_named_certificates }}"
when: named_certs_specified | bool
- name: Configure masters
hosts: oo_masters_to_config
any_errors_fatal: true
- serial: 1
vars:
sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
openshift_master_ha: "{{ openshift.master.ha }}"
@@ -321,19 +247,17 @@
}}"
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
- pre_tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ openshift.common.config_base }}/master"
- state: directory
- when: master_certs_missing | bool and 'oo_first_master' not in group_names
- - name: Unarchive the tarball on the master
- unarchive:
- src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
- dest: "{{ master_cert_config_dir }}"
- when: master_certs_missing | bool and 'oo_first_master' not in group_names
roles:
- - openshift_master
+ - role: openshift_master
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ openshift_master_etcd_hosts: "{{ hostvars
+ | oo_select_keys(groups['oo_etcd_to_config'] | default([]))
+ | oo_collect('openshift.common.hostname')
+ | default(none, true) }}"
+ openshift_master_hostnames: "{{ hostvars
+ | oo_select_keys(groups['oo_masters_to_config'] | default([]))
+ | oo_collect('openshift.common.all_hostnames')
+ | oo_flatten | unique }}"
- role: nickhammond.logrotate
- role: nuage_master
when: openshift.common.use_nuage | bool
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 80659dc52..a8c49d37b 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -19,23 +19,6 @@
labels: "{{ openshift_node_labels | default(None) }}"
annotations: "{{ openshift_node_annotations | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
- - name: Check status of node certificates
- stat:
- path: "{{ openshift.common.config_base }}/node/{{ item }}"
- with_items:
- - "system:node:{{ openshift.common.hostname }}.crt"
- - "system:node:{{ openshift.common.hostname }}.key"
- - "system:node:{{ openshift.common.hostname }}.kubeconfig"
- - ca.crt
- - server.key
- - server.crt
- register: stat_result
- - set_fact:
- certs_missing: "{{ stat_result.results | oo_collect(attribute='stat.exists')
- | list | intersect([false])}}"
- node_subdir: node-{{ openshift.common.hostname }}
- config_dir: "{{ openshift.common.config_base }}/generated-configs/node-{{ openshift.common.hostname }}"
- node_cert_dir: "{{ openshift.common.config_base }}/node"
- name: Create temp directory for syncing certs
hosts: localhost
@@ -48,53 +31,6 @@
register: mktemp
changed_when: False
-- name: Create node certificates
- hosts: oo_first_master
- vars:
- nodes_needing_certs: "{{ hostvars
- | oo_select_keys(groups['oo_nodes_to_config']
- | default([]))
- | oo_filter_list(filter_attr='certs_missing') }}"
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- roles:
- - openshift_node_certificates
- post_tasks:
- - name: Create a tarball of the node config directories
- command: >
- tar -czvf {{ item.config_dir }}.tgz
- --transform 's|system:{{ item.node_subdir }}|node|'
- -C {{ item.config_dir }} .
- args:
- creates: "{{ item.config_dir }}.tgz"
- with_items: "{{ nodes_needing_certs | default([]) }}"
-
- - name: Retrieve the node config tarballs from the master
- fetch:
- src: "{{ item.config_dir }}.tgz"
- dest: "{{ sync_tmpdir }}/"
- flat: yes
- fail_on_missing: yes
- validate_checksum: yes
- with_items: "{{ nodes_needing_certs | default([]) }}"
-
-- name: Deploy node certificates
- hosts: oo_nodes_to_config
- vars:
- sync_tmpdir: "{{ hostvars.localhost.mktemp.stdout }}"
- tasks:
- - name: Ensure certificate directory exists
- file:
- path: "{{ node_cert_dir }}"
- state: directory
- # TODO: notify restart node
- # possibly test service started time against certificate/config file
- # timestamps in node to trigger notify
- - name: Unarchive the tarball on the node
- unarchive:
- src: "{{ sync_tmpdir }}/{{ node_subdir }}.tgz"
- dest: "{{ node_cert_dir }}"
- when: certs_missing
-
- name: Evaluate node groups
hosts: localhost
become: no
@@ -124,7 +60,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - openshift_node
+ - role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Configure node instances
hosts: oo_nodes_to_config:!oo_containerized_master_nodes
@@ -140,7 +77,8 @@
when: "{{ (openshift_http_proxy is defined or openshift_https_proxy is defined) and
openshift_generate_no_proxy_hosts | default(True) | bool }}"
roles:
- - openshift_node
+ - role: openshift_node
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Gather and set facts for flannel certificatess
hosts: oo_nodes_to_config
diff --git a/playbooks/gce/openshift-cluster/config.yml b/playbooks/gce/openshift-cluster/config.yml
index 97572b930..8e46c5919 100644
--- a/playbooks/gce/openshift-cluster/config.yml
+++ b/playbooks/gce/openshift-cluster/config.yml
@@ -9,7 +9,7 @@
groups: l_oo_all_hosts
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index 21d82f422..299325fc4 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -2,6 +2,9 @@
# TODO: need to figure out a plan for setting hostname, currently the default
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
+
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
gather_facts: no
tasks:
@@ -10,7 +13,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
index 833586ffa..cc34d0ef9 100644
--- a/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/libvirt/openshift-cluster/tasks/launch_instances.yml
@@ -39,14 +39,14 @@
file:
dest: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: directory
- with_items: instances
+ with_items: '{{ instances }}'
- name: Create the cloud-init config drive files
template:
src: '{{ item[1] }}'
dest: '{{ libvirt_storage_pool_path }}/{{ item[0] }}_configdrive/{{ item[1] }}'
with_nested:
- - instances
+ - '{{ instances }}'
- [ user-data, meta-data ]
- name: Create the cloud-init config drive
@@ -54,18 +54,18 @@
args:
chdir: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
creates: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Refresh the libvirt storage pool for openshift
command: 'virsh -c {{ libvirt_uri }} pool-refresh {{ libvirt_storage_pool }}'
- name: Create VM drives
command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}.qcow2 10G --format qcow2 --backing-vol {{ image_name }} --backing-vol-format qcow2'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Create VM docker drives
command: 'virsh -c {{ libvirt_uri }} vol-create-as {{ libvirt_storage_pool }} {{ item }}-docker.qcow2 10G --format qcow2 --allocation 0'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Create VMs
virt:
@@ -73,14 +73,14 @@
command: define
xml: "{{ lookup('template', '../templates/domain.xml') }}"
uri: '{{ libvirt_uri }}'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Start VMs
virt:
name: '{{ item }}'
state: running
uri: '{{ libvirt_uri }}'
- with_items: instances
+ with_items: '{{ instances }}'
- name: Wait for the VMs to get an IP
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | egrep -c ''{{ instances | join("|") }}'''
@@ -93,7 +93,7 @@
- name: Collect IP addresses of the VMs
shell: 'virsh -c {{ libvirt_uri }} net-dhcp-leases {{ libvirt_network }} | awk ''$6 == "{{ item }}" {gsub(/\/.*/, "", $5); print $5}'''
register: scratch_ip
- with_items: instances
+ with_items: '{{ instances }}'
- set_fact:
ips: "{{ scratch_ip.results | default([]) | oo_collect('stdout') }}"
@@ -117,14 +117,14 @@
groups: "tag_environment-{{ cluster_env }}, tag_host-type-{{ type }}, tag_sub-host-type-{{ g_sub_host_type }}, tag_clusterid-{{ cluster_id }}"
openshift_node_labels: "{{ node_label }}"
with_together:
- - instances
- - ips
+ - '{{ instances }}'
+ - '{{ ips }}'
- name: Wait for ssh
wait_for:
host: '{{ item }}'
port: 22
- with_items: ips
+ with_items: '{{ ips }}'
- name: Wait for openshift user setup
command: 'ssh -o StrictHostKeyChecking=no -o PasswordAuthentication=no -o ConnectTimeout=10 -o UserKnownHostsFile=/dev/null openshift@{{ item.1 }} echo openshift user is setup'
@@ -133,5 +133,5 @@
retries: 30
delay: 1
with_together:
- - instances
- - ips
+ - '{{ instances }}'
+ - '{{ ips }}' \ No newline at end of file
diff --git a/playbooks/libvirt/openshift-cluster/templates/domain.xml b/playbooks/libvirt/openshift-cluster/templates/domain.xml
index b645a791a..88504a5f6 100644
--- a/playbooks/libvirt/openshift-cluster/templates/domain.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/domain.xml
@@ -19,6 +19,9 @@
<apic/>
<pae/>
</features>
+ <cpu mode='host-model'>
+ <model fallback='allow'/>
+ </cpu>
<clock offset='utc'>
<timer name='rtc' tickpolicy='catchup'/>
<timer name='pit' tickpolicy='delay'/>
diff --git a/playbooks/libvirt/openshift-cluster/terminate.yml b/playbooks/libvirt/openshift-cluster/terminate.yml
index baef911f9..df5c52f2d 100644
--- a/playbooks/libvirt/openshift-cluster/terminate.yml
+++ b/playbooks/libvirt/openshift-cluster/terminate.yml
@@ -15,7 +15,7 @@
groups: oo_hosts_to_terminate
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: groups[cluster_group] | default([])
+ with_items: '{{ groups[cluster_group] | default([]) }}'
- name: Unsubscribe VMs
hosts: oo_hosts_to_terminate
@@ -42,30 +42,30 @@
command: '{{ item[1] }}'
uri: '{{ libvirt_uri }}'
with_nested:
- - groups['oo_hosts_to_terminate']
+ - "{{ groups['oo_hosts_to_terminate'] }}"
- [ destroy, undefine ]
- name: Delete VM drives
command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}.qcow2'
args:
removes: '{{ libvirt_storage_pool_path }}/{{ item }}.qcow2'
- with_items: groups['oo_hosts_to_terminate']
+ with_items: "{{ groups['oo_hosts_to_terminate'] }}"
- name: Delete VM docker drives
command: 'virsh -c {{ libvirt_uri }} vol-delete --pool {{ libvirt_storage_pool }} {{ item }}-docker.qcow2'
args:
removes: '{{ libvirt_storage_pool_path }}/{{ item }}-docker.qcow2'
- with_items: groups['oo_hosts_to_terminate']
+ with_items: "{{ groups['oo_hosts_to_terminate'] }}"
- name: Delete the VM cloud-init image
file:
path: '{{ libvirt_storage_pool_path }}/{{ item }}_cloud-init.iso'
state: absent
- with_items: groups['oo_hosts_to_terminate']
+ with_items: "{{ groups['oo_hosts_to_terminate'] }}"
- name: Remove the cloud-init config directory
file:
path: '{{ libvirt_storage_pool_path }}/{{ item }}_configdrive/'
state: absent
- with_items: groups['oo_hosts_to_terminate']
+ with_items: "{{ groups['oo_hosts_to_terminate'] }}"
diff --git a/playbooks/libvirt/openshift-cluster/update.yml b/playbooks/libvirt/openshift-cluster/update.yml
index 28362c984..a152135fc 100644
--- a/playbooks/libvirt/openshift-cluster/update.yml
+++ b/playbooks/libvirt/openshift-cluster/update.yml
@@ -7,7 +7,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: '{{ g_all_hosts }}'
- hosts: l_oo_all_hosts
gather_facts: no
@@ -30,7 +30,7 @@
groups: oo_hosts_to_update
ansible_ssh_user: "{{ deployment_vars[deployment_type].ssh_user }}"
ansible_become: "{{ deployment_vars[deployment_type].become }}"
- with_items: g_all_hosts | default([])
+ with_items: '{{ g_all_hosts | default([]) }}'
- include: ../../common/openshift-cluster/update_repos_and_packages.yml
diff --git a/playbooks/openstack/openshift-cluster/config.yml b/playbooks/openstack/openshift-cluster/config.yml
index 6fff31826..f6550b2c4 100644
--- a/playbooks/openstack/openshift-cluster/config.yml
+++ b/playbooks/openstack/openshift-cluster/config.yml
@@ -1,4 +1,6 @@
---
+- include: ../../common/openshift-cluster/verify_ansible_version.yml
+
- hosts: localhost
gather_facts: no
tasks:
@@ -7,7 +9,7 @@
- add_host:
name: "{{ item }}"
groups: l_oo_all_hosts
- with_items: g_all_hosts
+ with_items: "{{ g_all_hosts | default([]) }}"
- hosts: l_oo_all_hosts
gather_facts: no
diff --git a/roles/etcd/tasks/main.yml b/roles/etcd/tasks/main.yml
index 71735dc25..75d40216d 100644
--- a/roles/etcd/tasks/main.yml
+++ b/roles/etcd/tasks/main.yml
@@ -12,6 +12,8 @@
- name: Pull etcd container
command: docker pull {{ openshift.etcd.etcd_image }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
when: etcd_is_containerized | bool
- name: Install etcd container service file
diff --git a/roles/openshift_ca/README.md b/roles/openshift_ca/README.md
new file mode 100644
index 000000000..96c9cd5f2
--- /dev/null
+++ b/roles/openshift_ca/README.md
@@ -0,0 +1,48 @@
+OpenShift CA
+============
+
+This role delegates all tasks to the `openshift_ca_host` such that this role can be depended on by other OpenShift certificate roles.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | Description |
+|-------------------------|-----------------------------------------------|-----------------------------------------------------------------------------|
+| openshift_ca_host | None (Required) | The hostname of the system where the OpenShift CA will be created. |
+| openshift_ca_config_dir | `{{ openshift.common.config_base }}/master` | CA certificate directory. |
+| openshift_ca_cert | `{{ openshift_ca_config_dir }}/ca.crt` | CA certificate path including CA certificate filename. |
+| openshift_ca_key | `{{ openshift_ca_config_dir }}/ca.key` | CA key path including CA key filename. |
+| openshift_ca_serial | `{{ openshift_ca_config_dir }}/ca.serial.txt` | CA serial path including CA serial filename. |
+| openshift_version | `{{ openshift_pkg_version }}` | OpenShift package version. |
+
+Dependencies
+------------
+
+* openshift_repos
+* openshift_cli
+
+Example Playbook
+----------------
+
+```
+- name: Create OpenShift CA
+ hosts: localhost
+ roles:
+ - role: openshift_ca
+ openshift_ca_host: master1.example.com
+```
+
+License
+-------
+
+Apache License Version 2.0
+
+Author Information
+------------------
+
+Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_master_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml
index b5dd466c9..a08aa1686 100644
--- a/roles/openshift_master_ca/meta/main.yml
+++ b/roles/openshift_ca/meta/main.yml
@@ -1,10 +1,10 @@
---
galaxy_info:
author: Jason DeTiberus
- description:
+ description: OpenShift CA
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 2.1
platforms:
- name: EL
versions:
@@ -13,5 +13,5 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_repos }
-- { role: openshift_cli }
+- role: openshift_repos
+- role: openshift_cli
diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml
new file mode 100644
index 000000000..a32e385ec
--- /dev/null
+++ b/roles/openshift_ca/vars/main.yml
@@ -0,0 +1,6 @@
+---
+openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
+openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
+openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
+openshift_version: "{{ openshift_pkg_version | default('') }}"
diff --git a/roles/openshift_cli/library/openshift_container_binary_sync.py b/roles/openshift_cli/library/openshift_container_binary_sync.py
new file mode 100644
index 000000000..fd290c6fc
--- /dev/null
+++ b/roles/openshift_cli/library/openshift_container_binary_sync.py
@@ -0,0 +1,131 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+# pylint: disable=missing-docstring,invalid-name
+#
+
+import random
+import tempfile
+import shutil
+import os.path
+
+# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import
+from ansible.module_utils.basic import *
+
+
+DOCUMENTATION = '''
+---
+module: openshift_container_binary_sync
+short_description: Copies OpenShift binaries out of the given image tag to host system.
+'''
+
+
+class BinarySyncError(Exception):
+ def __init__(self, msg):
+ super(BinarySyncError, self).__init__(msg)
+ self.msg = msg
+
+
+# pylint: disable=too-few-public-methods
+class BinarySyncer(object):
+ """
+ Syncs the openshift, oc, oadm, and kubectl binaries/symlinks out of
+ a container onto the host system.
+ """
+
+ def __init__(self, module, image, tag):
+ self.module = module
+ self.changed = False
+ self.output = []
+ self.bin_dir = '/usr/local/bin'
+ self.image = image
+ self.tag = tag
+ self.temp_dir = None # TBD
+
+ def sync(self):
+ container_name = "openshift-cli-%s" % random.randint(1, 100000)
+ rc, stdout, stderr = self.module.run_command(['docker', 'create', '--name',
+ container_name, '%s:%s' % (self.image, self.tag)])
+ if rc:
+ raise BinarySyncError("Error creating temporary docker container. stdout=%s, stderr=%s" %
+ (stdout, stderr))
+ self.output.append(stdout)
+ try:
+ self.temp_dir = tempfile.mkdtemp()
+ self.output.append("Using temp dir: %s" % self.temp_dir)
+
+ rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/openshift" % container_name,
+ self.temp_dir])
+ if rc:
+ raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
+ (stdout, stderr))
+
+ rc, stdout, stderr = self.module.run_command(['docker', 'cp', "%s:/usr/bin/oc" % container_name,
+ self.temp_dir])
+ if rc:
+ raise BinarySyncError("Error copying file from docker container: stdout=%s, stderr=%s" %
+ (stdout, stderr))
+
+ self._sync_binary('openshift')
+
+ # In older versions, oc was a symlink to openshift:
+ if os.path.islink(os.path.join(self.temp_dir, 'oc')):
+ self._sync_symlink('oc', 'openshift')
+ else:
+ self._sync_binary('oc')
+
+ # Ensure correct symlinks created:
+ self._sync_symlink('kubectl', 'openshift')
+ self._sync_symlink('oadm', 'openshift')
+ finally:
+ shutil.rmtree(self.temp_dir)
+ self.module.run_command(['docker', 'rm', container_name])
+
+ def _sync_symlink(self, binary_name, link_to):
+ """ Ensure the given binary name exists and links to the expected binary. """
+ link_path = os.path.join(self.bin_dir, binary_name)
+ link_dest = os.path.join(self.bin_dir, binary_name)
+ if not os.path.exists(link_path) or \
+ not os.path.islink(link_path) or \
+ os.path.realpath(link_path) != os.path.realpath(link_dest):
+ if os.path.exists(link_path):
+ os.remove(link_path)
+ os.symlink(link_to, os.path.join(self.bin_dir, binary_name))
+ self.output.append("Symlinked %s to %s." % (link_path, link_dest))
+ self.changed = True
+
+ def _sync_binary(self, binary_name):
+ src_path = os.path.join(self.temp_dir, binary_name)
+ dest_path = os.path.join(self.bin_dir, binary_name)
+ incoming_checksum = self.module.run_command(['sha256sum', src_path])[1]
+ if not os.path.exists(dest_path) or self.module.run_command(['sha256sum', dest_path])[1] != incoming_checksum:
+ shutil.move(src_path, dest_path)
+ self.output.append("Moved %s to %s." % (src_path, dest_path))
+ self.changed = True
+
+
+def main():
+ module = AnsibleModule(
+ argument_spec=dict(
+ image=dict(required=True),
+ tag=dict(required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ image = module.params['image']
+ tag = module.params['tag']
+
+ binary_syncer = BinarySyncer(module, image, tag)
+
+ try:
+ binary_syncer.sync()
+ except BinarySyncError as ex:
+ module.fail_json(msg=ex.msg)
+
+ return module.exit_json(changed=binary_syncer.changed,
+ output=binary_syncer.output)
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 4d6219b94..11c73b25c 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -6,24 +6,14 @@
- name: Pull CLI Image
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
when: openshift.common.is_containerized | bool
-- name: Create /usr/local/bin/openshift cli wrapper
- template:
- src: openshift.j2
- dest: /usr/local/bin/openshift
- mode: 0755
- when: openshift.common.is_containerized | bool
-
-- name: Create client symlinks
- file:
- path: "{{ item }}"
- state: link
- src: /usr/local/bin/openshift
- with_items:
- - /usr/local/bin/oadm
- - /usr/local/bin/oc
- - /usr/local/bin/kubectl
+- name: Copy client binaries/symlinks out of CLI image for use on the host
+ openshift_container_binary_sync:
+ image: "{{ openshift.common.cli_image }}"
+ tag: "{{ openshift_image_tag }}"
when: openshift.common.is_containerized | bool
- name: Reload facts to pick up installed OpenShift version
diff --git a/roles/openshift_cli/templates/openshift.j2 b/roles/openshift_cli/templates/openshift.j2
deleted file mode 100644
index 7786acead..000000000
--- a/roles/openshift_cli/templates/openshift.j2
+++ /dev/null
@@ -1,28 +0,0 @@
-#!/bin/bash
-if [ ! -d ~/.kube ]; then
- mkdir -m 0700 ~/.kube
-fi
-cmd=`basename $0`
-user=`id -u`
-group=`id -g`
-image_tag="{{ openshift_image_tag }}"
-
->&2 echo """
-================================================================================
-ATTENTION: You are running ${cmd} via a wrapper around 'docker run {{ openshift.common.cli_image }}:${image_tag}'.
-This wrapper is intended only to be used to bootstrap an environment. Please
-install client tools on another host once you have granted cluster-admin
-privileges to a user.
-{% if openshift.common.deployment_type in ['openshift-enterprise','atomic-enterprise'] %}
-See https://docs.openshift.com/enterprise/latest/cli_reference/get_started_cli.html
-{% else %}
-See https://docs.openshift.org/latest/cli_reference/get_started_cli.html
-{% endif %}
-=================================================================================
-"""
-
-if [ -n "$image_tag" ]; then
- image_tag=":$image_tag"
-fi
-
-docker run -i --privileged --net=host --user=${user}:${group} -v ~/.kube:/root/.kube -v /tmp:/tmp -v {{ openshift.common.config_base}}:{{ openshift.common.config_base }} -e KUBECONFIG=/root/.kube/config --entrypoint ${cmd} --rm {{ openshift.common.cli_image }}${image_tag} "${@}"
diff --git a/roles/openshift_cluster_metrics/README.md b/roles/openshift_cluster_metrics/README.md
deleted file mode 100644
index 9fdfab8e3..000000000
--- a/roles/openshift_cluster_metrics/README.md
+++ /dev/null
@@ -1,36 +0,0 @@
-#openshift_cluster_metrics
-
-This role configures Cluster wide metrics. It does setting up three services:
-* Metrics are stored in InfluxDB for querying.
-* Heapster reads all nodes and pods from the master, then connects to eachs node's kubelet to retrieve pod metrics.
-* Grafan allows users to create dashboards of metrics from InfluxDB
-
-## Requirements
-
-Running OpenShift cluster
-
-## Role Variables
-
-```
-# Enable cluster metrics
-use_cluster_metrics=true
-```
-
-## Dependencies
-
-None
-
-## Example Playbook
-
-TODO
-
-## Security Note
-Opening up the read-only port exposes information about the running pods (such as namespace, pod name, labels, etc.) to unauthenticated clients. The requirement to open up this read-only port will be fixed in future versions.
-
-##License
-
-Apache License, Version 2.0
-
-## Author Information
-
-Diego Castro (diego.castro@getupcloud.com)
diff --git a/roles/openshift_cluster_metrics/files/cluster-metrics/grafana.yaml b/roles/openshift_cluster_metrics/files/cluster-metrics/grafana.yaml
deleted file mode 100644
index bff422efc..000000000
--- a/roles/openshift_cluster_metrics/files/cluster-metrics/grafana.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
- -
- apiVersion: "v1"
- kind: "Service"
- metadata:
- labels:
- provider: "fabric8"
- component: "grafana"
- name: "grafana"
- spec:
- ports:
- -
- port: 80
- targetPort: "http"
- selector:
- provider: "fabric8"
- component: "grafana"
- -
- apiVersion: "v1"
- kind: "ReplicationController"
- metadata:
- labels:
- provider: "fabric8"
- component: "grafana"
- name: "grafana"
- spec:
- replicas: 1
- selector:
- provider: "fabric8"
- component: "grafana"
- template:
- metadata:
- labels:
- provider: "fabric8"
- component: "grafana"
- spec:
- containers:
- -
- env:
- -
- name: "INFLUXDB_SERVICE_NAME"
- value: "INFLUXDB_MONITORING"
- -
- name: "GRAFANA_DEFAULT_DASHBOARD"
- value: "/dashboard/file/kubernetes.json"
- image: "fabric8/grafana:1.9.1_2"
- name: "grafana"
- ports:
- -
- containerPort: 3000
- name: "http" \ No newline at end of file
diff --git a/roles/openshift_cluster_metrics/files/cluster-metrics/heapster-serviceaccount.yaml b/roles/openshift_cluster_metrics/files/cluster-metrics/heapster-serviceaccount.yaml
deleted file mode 100644
index 1de2ad699..000000000
--- a/roles/openshift_cluster_metrics/files/cluster-metrics/heapster-serviceaccount.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: heapster \ No newline at end of file
diff --git a/roles/openshift_cluster_metrics/files/cluster-metrics/heapster.yaml b/roles/openshift_cluster_metrics/files/cluster-metrics/heapster.yaml
deleted file mode 100644
index 83e314074..000000000
--- a/roles/openshift_cluster_metrics/files/cluster-metrics/heapster.yaml
+++ /dev/null
@@ -1,30 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
- -
- apiVersion: "v1"
- kind: "ReplicationController"
- metadata:
- labels:
- provider: "fabric8"
- component: "heapster"
- name: "heapster"
- spec:
- replicas: 1
- selector:
- provider: "fabric8"
- component: "heapster"
- template:
- metadata:
- labels:
- provider: "fabric8"
- component: "heapster"
- spec:
- containers:
- -
- args:
- - "-source=kubernetes:https://kubernetes.default.svc.cluster.local?auth=&insecure=true&useServiceAccount=true"
- - "-sink=influxdb:http://influxdb-monitoring.default.svc.cluster.local:8086"
- image: "kubernetes/heapster:V0.14.2"
- name: "heapster"
- serviceAccount: "heapster" \ No newline at end of file
diff --git a/roles/openshift_cluster_metrics/files/cluster-metrics/influxdb.yaml b/roles/openshift_cluster_metrics/files/cluster-metrics/influxdb.yaml
deleted file mode 100644
index 6f67c3d7c..000000000
--- a/roles/openshift_cluster_metrics/files/cluster-metrics/influxdb.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
-apiVersion: "v1"
-kind: "List"
-items:
- -
- apiVersion: "v1"
- kind: "Service"
- metadata:
- labels:
- provider: "fabric8"
- component: "influxdb-monitoring"
- name: "influxdb-monitoring"
- spec:
- ports:
- -
- port: 8086
- targetPort: "http"
- selector:
- provider: "fabric8"
- component: "influxdb-monitoring"
- -
- apiVersion: "v1"
- kind: "ReplicationController"
- metadata:
- labels:
- provider: "fabric8"
- component: "influxdb-monitoring"
- name: "influxdb-monitoring"
- spec:
- replicas: 1
- selector:
- provider: "fabric8"
- component: "influxdb-monitoring"
- template:
- metadata:
- labels:
- provider: "fabric8"
- component: "influxdb-monitoring"
- spec:
- containers:
- -
- env:
- -
- name: "PRE_CREATE_DB"
- value: "k8s;grafana"
- image: "fabric8/influxdb:0.8.8"
- name: "influxdb"
- ports:
- -
- containerPort: 8090
- name: "raft"
- -
- containerPort: 8099
- name: "protobuf"
- -
- containerPort: 8083
- name: "admin"
- -
- containerPort: 8086
- name: "http"
- volumeMounts:
- -
- mountPath: "/data"
- name: "influxdb-data"
- volumes:
- -
- emptyDir:
- name: "influxdb-data" \ No newline at end of file
diff --git a/roles/openshift_cluster_metrics/tasks/main.yml b/roles/openshift_cluster_metrics/tasks/main.yml
deleted file mode 100644
index 1fc8a074a..000000000
--- a/roles/openshift_cluster_metrics/tasks/main.yml
+++ /dev/null
@@ -1,49 +0,0 @@
----
-
-- name: Install cluster metrics templates
- copy:
- src: cluster-metrics
- dest: /etc/origin/
-
-- name: Create InfluxDB Services
- command: >
- {{ openshift.common.client_binary }} create -f
- /etc/origin/cluster-metrics/influxdb.yaml
- register: oex_influxdb_services
- failed_when: "'already exists' not in oex_influxdb_services.stderr and oex_influxdb_services.rc != 0"
- changed_when: false
-
-- name: Create Heapster Service Account
- command: >
- {{ openshift.common.client_binary }} create -f
- /etc/origin/cluster-metrics/heapster-serviceaccount.yaml
- register: oex_heapster_serviceaccount
- failed_when: "'already exists' not in oex_heapster_serviceaccount.stderr and oex_heapster_serviceaccount.rc != 0"
- changed_when: false
-
-- name: Add cluster-reader role to Heapster
- command: >
- {{ openshift.common.admin_binary }} policy
- add-cluster-role-to-user
- cluster-reader
- system:serviceaccount:default:heapster
- register: oex_cluster_header_role
- failed_when: "'already exists' not in oex_cluster_header_role.stderr and oex_cluster_header_role.rc != 0"
- changed_when: false
-
-- name: Create Heapster Services
- command: >
- {{ openshift.common.client_binary }} create -f
- /etc/origin/cluster-metrics/heapster.yaml
- register: oex_heapster_services
- failed_when: "'already exists' not in oex_heapster_services.stderr and oex_heapster_services.rc != 0"
- changed_when: false
-
-- name: Create Grafana Services
- command: >
- {{ openshift.common.client_binary }} create -f
- /etc/origin/cluster-metrics/grafana.yaml
- register: oex_grafana_services
- failed_when: "'already exists' not in oex_grafana_services.stderr and oex_grafana_services.rc != 0"
- changed_when: false
-
diff --git a/roles/openshift_examples/README.md b/roles/openshift_examples/README.md
index 6ddbe7017..8cc479c73 100644
--- a/roles/openshift_examples/README.md
+++ b/roles/openshift_examples/README.md
@@ -25,7 +25,7 @@ Role Variables
|-------------------------------------|-----------------------------------------------------|------------------------------------------|
| openshift_examples_load_centos | true when openshift_deployment_typenot 'enterprise' | Load centos image streams |
| openshift_examples_load_rhel | true if openshift_deployment_type is 'enterprise' | Load rhel image streams |
-| openshift_examples_load_db_templates| true | Loads databcase templates |
+| openshift_examples_load_db_templates| true | Loads database templates |
| openshift_examples_load_quickstarts | true | Loads quickstarts ie: nodejs, rails, etc |
| openshift_examples_load_xpaas | false | Loads xpass streams and templates |
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index 7b4a8440e..ef2da946a 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -29,6 +29,7 @@ unzip cakephp-ex-master.zip
unzip application-templates-master.zip
cp origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
cp origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
+cp origin-master/examples/jenkins/pipeline/jenkinstemplate.json ${EXAMPLES_BASE}/quickstart-templates/
cp origin-master/examples/image-streams/* ${EXAMPLES_BASE}/image-streams/
cp django-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
cp rails-ex-master/openshift/templates/* ${EXAMPLES_BASE}/quickstart-templates/
diff --git a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
index d971e5e7a..8aedf80fe 100644
--- a/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v1.3/image-streams/image-streams-centos7.json
@@ -92,7 +92,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "0.10"
+ "name": "4"
}
},
{
@@ -109,6 +109,21 @@
"kind": "DockerImage",
"name": "openshift/nodejs-010-centos7:latest"
}
+ },
+ {
+ "name": "4",
+ "annotations": {
+ "description": "Build and run NodeJS 4 applications",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:4,nodejs",
+ "version": "4",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-4-centos7:latest"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
index bc9c8e8fd..cc7920b7d 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/dancer-mysql.json
@@ -207,9 +207,9 @@
}
],
"resources": {
- "limits": {
- "memory": "${MEMORY_LIMIT}"
- }
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
}
}
]
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
index 0b7fd7cab..7d1dea11b 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django-postgresql.json
@@ -83,7 +83,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "python:3.4"
+ "name": "python:3.5"
},
"env": [
{
@@ -273,7 +273,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "postgresql:9.4"
+ "name": "postgresql:9.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django.json
index 9e84e27e1..1c2e40d70 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/django.json
@@ -83,7 +83,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "python:3.4"
+ "name": "python:3.5"
},
"env": [
{
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json
new file mode 100644
index 000000000..325663313
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/jenkinstemplate.json
@@ -0,0 +1,255 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null,
+ "annotations": {
+ "description": "Jenkins service, without persistent storage. WARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
+ "iconClass": "icon-jenkins",
+ "tags": "instant-app,jenkins"
+ }
+ },
+ "objects": [
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "tls": {
+ "termination": "edge",
+ "insecureEdgeTerminationPolicy": "Redirect",
+ "certificate": "-----BEGIN CERTIFICATE-----\nMIIDIjCCAgqgAwIBAgIBATANBgkqhkiG9w0BAQUFADCBoTELMAkGA1UEBhMCVVMx\nCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkxHDAaBgNVBAoME0Rl\nZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0ExGjAYBgNVBAMMEXd3\ndy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFtcGxlQGV4YW1wbGUu\nY29tMB4XDTE1MDExMjE0MTk0MVoXDTE2MDExMjE0MTk0MVowfDEYMBYGA1UEAwwP\nd3d3LmV4YW1wbGUuY29tMQswCQYDVQQIDAJTQzELMAkGA1UEBhMCVVMxIjAgBgkq\nhkiG9w0BCQEWE2V4YW1wbGVAZXhhbXBsZS5jb20xEDAOBgNVBAoMB0V4YW1wbGUx\nEDAOBgNVBAsMB0V4YW1wbGUwgZ8wDQYJKoZIhvcNAQEBBQADgY0AMIGJAoGBAMrv\ngu6ZTTefNN7jjiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm\n47VRx5Qrf/YLXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1M\nmNrQUgZyQC6XIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAGjDTALMAkGA1UdEwQC\nMAAwDQYJKoZIhvcNAQEFBQADggEBAFCi7ZlkMnESvzlZCvv82Pq6S46AAOTPXdFd\nTMvrh12E1sdVALF1P1oYFJzG1EiZ5ezOx88fEDTW+Lxb9anw5/KJzwtWcfsupf1m\nV7J0D3qKzw5C1wjzYHh9/Pz7B1D0KthQRATQCfNf8s6bbFLaw/dmiIUhHLtIH5Qc\nyfrejTZbOSP77z8NOWir+BWWgIDDB2//3AkDIQvT20vmkZRhkqSdT7et4NmXOX/j\njhPti4b2Fie0LeuvgaOdKjCpQQNrYthZHXeVlOLRhMTSk3qUczenkKTOhvP7IS9q\n+Dzv5hqgSfvMG392KWh5f8xXfJNs4W5KLbZyl901MeReiLrPH3w=\n-----END CERTIFICATE-----",
+ "key": "-----BEGIN PRIVATE KEY-----\nMIICeAIBADANBgkqhkiG9w0BAQEFAASCAmIwggJeAgEAAoGBAMrvgu6ZTTefNN7j\njiZbS/xvQjyXjYMN7oVXv76jbX8gjMOmg9m0xoVZZFAE4XyQDuCm47VRx5Qrf/YL\nXmB2VtCFvB0AhXr5zSeWzPwaAPrjA4ebG+LUo24ziS8KqNxrFs1MmNrQUgZyQC6X\nIe1JHXc9t+JlL5UZyZQC1IfaJulDAgMBAAECgYEAnxOjEj/vrLNLMZE1Q9H7PZVF\nWdP/JQVNvQ7tCpZ3ZdjxHwkvf//aQnuxS5yX2Rnf37BS/TZu+TIkK4373CfHomSx\nUTAn2FsLmOJljupgGcoeLx5K5nu7B7rY5L1NHvdpxZ4YjeISrRtEPvRakllENU5y\ngJE8c2eQOx08ZSRE4TkCQQD7dws2/FldqwdjJucYijsJVuUdoTqxP8gWL6bB251q\nelP2/a6W2elqOcWId28560jG9ZS3cuKvnmu/4LG88vZFAkEAzphrH3673oTsHN+d\nuBd5uyrlnGjWjuiMKv2TPITZcWBjB8nJDSvLneHF59MYwejNNEof2tRjgFSdImFH\nmi995wJBAMtPjW6wiqRz0i41VuT9ZgwACJBzOdvzQJfHgSD9qgFb1CU/J/hpSRIM\nkYvrXK9MbvQFvG6x4VuyT1W8mpe1LK0CQAo8VPpffhFdRpF7psXLK/XQ/0VLkG3O\nKburipLyBg/u9ZkaL0Ley5zL5dFBjTV2Qkx367Ic2b0u9AYTCcgi2DsCQQD3zZ7B\nv7BOm7MkylKokY2MduFFXU0Bxg6pfZ7q3rvg8gqhUFbaMStPRYg6myiDiW/JfLhF\nTcFT4touIo7oriFJ\n-----END PRIVATE KEY-----",
+ "caCertificate": "-----BEGIN CERTIFICATE-----\nMIIEFzCCAv+gAwIBAgIJALK1iUpF2VQLMA0GCSqGSIb3DQEBBQUAMIGhMQswCQYD\nVQQGEwJVUzELMAkGA1UECAwCU0MxFTATBgNVBAcMDERlZmF1bHQgQ2l0eTEcMBoG\nA1UECgwTRGVmYXVsdCBDb21wYW55IEx0ZDEQMA4GA1UECwwHVGVzdCBDQTEaMBgG\nA1UEAwwRd3d3LmV4YW1wbGVjYS5jb20xIjAgBgkqhkiG9w0BCQEWE2V4YW1wbGVA\nZXhhbXBsZS5jb20wHhcNMTUwMTEyMTQxNTAxWhcNMjUwMTA5MTQxNTAxWjCBoTEL\nMAkGA1UEBhMCVVMxCzAJBgNVBAgMAlNDMRUwEwYDVQQHDAxEZWZhdWx0IENpdHkx\nHDAaBgNVBAoME0RlZmF1bHQgQ29tcGFueSBMdGQxEDAOBgNVBAsMB1Rlc3QgQ0Ex\nGjAYBgNVBAMMEXd3dy5leGFtcGxlY2EuY29tMSIwIAYJKoZIhvcNAQkBFhNleGFt\ncGxlQGV4YW1wbGUuY29tMIIBIjANBgkqhkiG9w0BAQEFAAOCAQ8AMIIBCgKCAQEA\nw2rK1J2NMtQj0KDug7g7HRKl5jbf0QMkMKyTU1fBtZ0cCzvsF4CqV11LK4BSVWaK\nrzkaXe99IVJnH8KdOlDl5Dh/+cJ3xdkClSyeUT4zgb6CCBqg78ePp+nN11JKuJlV\nIG1qdJpB1J5O/kCLsGcTf7RS74MtqMFo96446Zvt7YaBhWPz6gDaO/TUzfrNcGLA\nEfHVXkvVWqb3gqXUztZyVex/gtP9FXQ7gxTvJml7UkmT0VAFjtZnCqmFxpLZFZ15\n+qP9O7Q2MpsGUO/4vDAuYrKBeg1ZdPSi8gwqUP2qWsGd9MIWRv3thI2903BczDc7\nr8WaIbm37vYZAS9G56E4+wIDAQABo1AwTjAdBgNVHQ4EFgQUugLrSJshOBk5TSsU\nANs4+SmJUGwwHwYDVR0jBBgwFoAUugLrSJshOBk5TSsUANs4+SmJUGwwDAYDVR0T\nBAUwAwEB/zANBgkqhkiG9w0BAQUFAAOCAQEAaMJ33zAMV4korHo5aPfayV3uHoYZ\n1ChzP3eSsF+FjoscpoNSKs91ZXZF6LquzoNezbfiihK4PYqgwVD2+O0/Ty7UjN4S\nqzFKVR4OS/6lCJ8YncxoFpTntbvjgojf1DEataKFUN196PAANc3yz8cWHF4uvjPv\nWkgFqbIjb+7D1YgglNyovXkRDlRZl0LD1OQ0ZWhd4Ge1qx8mmmanoBeYZ9+DgpFC\nj9tQAbS867yeOryNe7sEOIpXAAqK/DTu0hB6+ySsDfMo4piXCc2aA/eI2DCuw08e\nw17Dz9WnupZjVdwTKzDhFgJZMLDqn37HQnT6EemLFqbcR0VPEnfyhDtZIQ==\n-----END CERTIFICATE-----"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "jenkins"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "jenkins:1",
+ "namespace": "openshift"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "creationTimestamp": null,
+ "labels": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${JENKINS_SERVICE_NAME}",
+ "containers": [
+ {
+ "name": "jenkins",
+ "image": " ",
+ "readinessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 3,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "livenessProbe": {
+ "timeoutSeconds": 3,
+ "initialDelaySeconds": 120,
+ "httpGet": {
+ "path": "/login",
+ "port": 8080
+ }
+ },
+ "env": [
+ {
+ "name": "JENKINS_PASSWORD",
+ "value": "${JENKINS_PASSWORD}"
+ },
+ {
+ "name": "KUBERNETES_MASTER",
+ "value": "https://kubernetes.default:443"
+ },
+ {
+ "name": "KUBERNETES_TRUST_CERTIFICATES",
+ "value": "true"
+ }
+ ],
+ "resources": {
+ "limits": {
+ "memory": "${MEMORY_LIMIT}"
+ }
+ },
+ "volumeMounts": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "mountPath": "/var/lib/jenkins"
+ }
+ ],
+ "terminationMessagePath": "/dev/termination-log",
+ "imagePullPolicy": "IfNotPresent",
+ "capabilities": {},
+ "securityContext": {
+ "capabilities": {},
+ "privileged": false
+ }
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${JENKINS_SERVICE_NAME}-data",
+ "emptyDir": {
+ "medium": ""
+ }
+ }
+ ],
+ "restartPolicy": "Always",
+ "dnsPolicy": "ClusterFirst"
+ }
+ }
+ }
+ },
+ {
+ "kind": "ServiceAccount",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ },
+ {
+ "kind": "RoleBinding",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}_edit"
+ },
+ "groupNames": null,
+ "subjects": [
+ {
+ "kind": "ServiceAccount",
+ "name": "${JENKINS_SERVICE_NAME}"
+ }
+ ],
+ "roleRef": {
+ "name": "edit"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "jenkins-jnlp",
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "agent",
+ "protocol": "TCP",
+ "port": 50000,
+ "targetPort": 50000,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${JENKINS_SERVICE_NAME}",
+ "annotations": {
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"jenkins-jnlp\", \"namespace\": \"\", \"kind\": \"Service\"}]",
+ "service.openshift.io/infrastructure": "true"
+ },
+ "creationTimestamp": null
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "web",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 8080,
+ "nodePort": 0
+ }
+ ],
+ "selector": {
+ "name": "${JENKINS_SERVICE_NAME}"
+ },
+ "portalIP": "",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ }
+ }
+ ],
+ "parameters": [
+ {
+ "name": "MEMORY_LIMIT",
+ "displayName": "Memory Limit",
+ "description": "Maximum amount of memory the container can use.",
+ "value": "512Mi"
+ },
+ {
+ "name": "NAMESPACE",
+ "displayName": "Namespace",
+ "description": "The OpenShift Namespace where the ImageStream resides.",
+ "value": "openshift"
+ },
+ {
+ "name": "JENKINS_SERVICE_NAME",
+ "displayName": "Jenkins Service Name",
+ "description": "The name of the OpenShift Service exposed for the Jenkins container.",
+ "value": "jenkins"
+ },
+ {
+ "name": "JENKINS_PASSWORD",
+ "displayName": "Jenkins Password",
+ "description": "Password for the Jenkins user.",
+ "generate": "expression",
+ "value": "password"
+ }
+ ],
+ "labels": {
+ "template": "jenkins-ephemeral-template"
+ }
+}
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
index b2b9f2478..6ab4a1781 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs-mongodb.json
@@ -83,7 +83,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:0.10"
+ "name": "nodejs:4"
},
"env": [
{
@@ -271,7 +271,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "mongodb:2.6"
+ "name": "mongodb:3.2"
}
}
},
@@ -322,7 +322,7 @@
"timeoutSeconds": 1,
"initialDelaySeconds": 3,
"exec": {
- "command": [ "/bin/sh", "-i", "-c", "mongostat --host 127.0.0.1 -u admin -p ${DATABASE_ADMIN_PASSWORD} -n 1 --noheaders"]
+ "command": [ "/bin/sh", "-i", "-c", "mongo 127.0.0.1:27017/$MONGODB_DATABASE -u $MONGODB_USER -p $MONGODB_PASSWORD --eval=\"quit()\""]
}
},
"livenessProbe": {
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs.json
index 08c7d3106..ec262e4e8 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/nodejs.json
@@ -83,7 +83,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "nodejs:0.10"
+ "name": "nodejs:4"
},
"env": [
{
diff --git a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
index e64e2feeb..50d60f2bb 100644
--- a/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v1.3/quickstart-templates/rails-postgresql.json
@@ -83,7 +83,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "ruby:2.2"
+ "name": "ruby:2.3"
},
"env": [
{
@@ -300,7 +300,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${NAMESPACE}",
- "name": "postgresql:9.4"
+ "name": "postgresql:9.5"
}
}
},
diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml
index 7ea39f51e..8d2248578 100644
--- a/roles/openshift_examples/tasks/main.yml
+++ b/roles/openshift_examples/tasks/main.yml
@@ -1,9 +1,46 @@
---
-- name: Copy openshift examples
- copy:
- src: "examples/{{ content_version }}/"
+######################################################################
+# Copying Examples
+#
+# We used to use the copy module to transfer the openshift examples to
+# the remote. Then it started taking more than a minute to transfer
+# the files. As noted in the module:
+#
+# "The 'copy' module recursively copy facility does not scale to
+# lots (>hundreds) of files."
+#
+# The `synchronize` module is suggested as an alternative, we can't
+# use it either due to changes introduced in Ansible 2.x.
+- name: Create local temp dir for OpenShift examples copy
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ become: False
+ register: copy_examples_mktemp
+ run_once: True
+
+- name: Create tar of OpenShift examples
+ local_action: command tar -C "{{ role_path }}/files/examples/{{ content_version }}/" -cvf "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar" .
+ become: False
+ register: copy_examples_tar
+
+- name: Create the remote OpenShift examples directory
+ file:
+ dest: "{{ examples_base }}"
+ state: directory
+ mode: 0755
+
+- name: Unarchive the OpenShift examples on the remote
+ unarchive:
+ src: "{{ copy_examples_mktemp.stdout }}/openshift-examples.tar"
dest: "{{ examples_base }}/"
+- name: Cleanup the OpenShift Examples temp dir
+ become: False
+ local_action: file dest="{{ copy_examples_mktemp.stdout }}" state=absent
+
+# Done copying examples
+######################################################################
+# Begin image streams
+
- name: Modify registry paths if registry_url is not registry.access.redhat.com
shell: >
find {{ examples_base }} -type f | xargs -n 1 sed -i 's|registry.access.redhat.com|{{ registry_host | quote }}|g'
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 54bdbc775..659f4eba6 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -480,23 +480,6 @@ def set_selectors(facts):
return facts
-def set_metrics_facts_if_unset(facts):
- """ Set cluster metrics facts if not already present in facts dict
- dict: the facts dict updated with the generated cluster metrics facts if
- missing
- Args:
- facts (dict): existing facts
- Returns:
- dict: the facts dict updated with the generated cluster metrics
- facts if they were not already present
-
- """
- if 'common' in facts:
- if 'use_cluster_metrics' not in facts['common']:
- use_cluster_metrics = False
- facts['common']['use_cluster_metrics'] = use_cluster_metrics
- return facts
-
def set_dnsmasq_facts_if_unset(facts):
""" Set dnsmasq facts if not already present in facts
Args:
@@ -1674,7 +1657,6 @@ class OpenShiftFacts(object):
facts = set_nuage_facts_if_unset(facts)
facts = set_node_schedulability(facts)
facts = set_selectors(facts)
- facts = set_metrics_facts_if_unset(facts)
facts = set_identity_providers_if_unset(facts)
facts = set_sdn_facts_if_unset(facts, self.system_facts)
facts = set_deployment_facts_if_unset(facts)
diff --git a/roles/openshift_facts/tasks/main.yml b/roles/openshift_facts/tasks/main.yml
index c3723672d..4dbbd7f45 100644
--- a/roles/openshift_facts/tasks/main.yml
+++ b/roles/openshift_facts/tasks/main.yml
@@ -1,9 +1,4 @@
---
-- name: Verify Ansible version is greater than or equal to 1.9.4
- fail:
- msg: "Unsupported ansible version: {{ ansible_version }} found"
- when: not ansible_version.full | version_compare('1.9.4', 'ge')
-
- name: Detecting Operating System
stat:
path: /run/ostree-booted
diff --git a/roles/openshift_hosted/tasks/registry/registry.yml b/roles/openshift_hosted/tasks/registry/registry.yml
index a242ce30f..a1edef132 100644
--- a/roles/openshift_hosted/tasks/registry/registry.yml
+++ b/roles/openshift_hosted/tasks/registry/registry.yml
@@ -9,7 +9,7 @@
when: openshift.hosted.registry.replicas | default(none) is none
- set_fact:
- replicas: "{{ openshift.hosted.registry.replicas | default(((openshift_hosted_registry_nodes_json.stdout | from_json)['items'] | length) if openshift.hosted.registry.storage.kind | default(none) is not none else 1) }}"
+ replicas: "{{ openshift.hosted.registry.replicas | default(((openshift_hosted_registry_nodes_json.stdout | default('{\"items\":[]}') | from_json)['items'] | length) if openshift.hosted.registry.storage.kind | default(none) is not none else 1) }}"
- name: Create OpenShift registry
command: >
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 9db67ecc6..7b1b3f6ff 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -24,10 +24,10 @@
failed_when: false
- set_fact:
- registry_config: "{{ lookup('template', '../templates/registry_config.j2') | b64encode }}"
+ registry_config: "{{ lookup('template', 'registry_config.j2') | b64encode }}"
- set_fact:
- registry_config_secret: "{{ lookup('template', '../templates/registry_config_secret.j2') | from_yaml }}"
+ registry_config_secret: "{{ lookup('template', 'registry_config_secret.j2') | from_yaml }}"
- set_fact:
same_storage_provider: "{{ (secrets.stdout|from_json)['metadata']['annotations']['provider'] | default(none) == openshift.hosted.registry.storage.provider }}"
@@ -111,4 +111,4 @@
--config={{ openshift_hosted_kubeconfig }}
--namespace={{ openshift.hosted.registry.namespace | default('default') }}
deploy dc/docker-registry --latest
- when: secrets.rc == 0 and update_config_secret.rc == 0 and same_storage_provider | bool
+ when: secrets.rc == 0 and not update_config_secret | skipped and update_config_secret.rc == 0 and same_storage_provider | bool
diff --git a/roles/openshift_hosted/tasks/registry/storage/registry_config.j2 b/roles/openshift_hosted/tasks/registry/storage/registry_config.j2
new file mode 120000
index 000000000..f3e82ad4f
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry/storage/registry_config.j2
@@ -0,0 +1 @@
+../../../templates/registry_config.j2 \ No newline at end of file
diff --git a/roles/openshift_hosted/tasks/registry/storage/registry_config_secret.j2 b/roles/openshift_hosted/tasks/registry/storage/registry_config_secret.j2
new file mode 120000
index 000000000..b9e82c1ea
--- /dev/null
+++ b/roles/openshift_hosted/tasks/registry/storage/registry_config_secret.j2
@@ -0,0 +1 @@
+../../../templates/registry_config_secret.j2 \ No newline at end of file
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index c011db762..7f3731c7d 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -9,10 +9,15 @@
module: slurp
src: "{{ item }}"
register: openshift_router_certificate_output
+ # Defaulting dictionary keys to none to avoid deprecation warnings
+ # (future fatal errors) during template evaluation. Dictionary keys
+ # won't be accessed unless openshift_hosted_router_certificate is
+ # defined and has all keys (certfile, keyfile, cafile) which we
+ # check above.
with_items:
- - "{{ openshift_hosted_router_certificate.certfile }}"
- - "{{ openshift_hosted_router_certificate.keyfile }}"
- - "{{ openshift_hosted_router_certificate.cafile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'certfile':none})).certfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'keyfile':none})).keyfile }}"
+ - "{{ (openshift_hosted_router_certificate | default({'cafile':none})).cafile }}"
when: openshift_hosted_router_certificate is defined
- name: Persist certificate contents
@@ -27,7 +32,7 @@
content: "{{ openshift.hosted.router.certificate.contents }}"
dest: "{{ openshift_master_config_dir }}/openshift-router.pem"
mode: 0600
- when: openshift.hosted.router.certificate | default(none) is not none
+ when: "'certificate' in openshift.hosted.router and 'contents' in openshift.hosted.router.certificate"
- name: Retrieve list of openshift nodes matching router selector
command: >
@@ -39,7 +44,7 @@
when: openshift.hosted.router.replicas | default(none) is none
- set_fact:
- replicas: "{{ openshift.hosted.router.replicas | default((openshift_hosted_router_nodes_json.stdout | from_json)['items'] | length) }}"
+ replicas: "{{ openshift.hosted.router.replicas | default((openshift_hosted_router_nodes_json.stdout | default('{\"items\":[]}') | from_json)['items'] | length) }}"
- name: Create OpenShift router
command: >
@@ -48,7 +53,7 @@
{% if replicas > 1 -%}
--replicas={{ replicas }}
{% endif -%}
- {% if openshift.hosted.router.certificate | default(none) is not none -%}
+ {% if 'certificate' in openshift.hosted.router and 'contents' in openshift.hosted.router.certificate -%}
--default-cert={{ openshift_master_config_dir }}/openshift-router.pem
{% endif -%}
--namespace={{ openshift.hosted.router.namespace | default('default') }}
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index 0a69b3eef..be70d9102 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: Master
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.7
+ min_ansible_version: 2.1
platforms:
- name: EL
versions:
@@ -13,8 +13,7 @@ galaxy_info:
- cloud
dependencies:
- role: openshift_clock
-- role: openshift_docker
-- role: openshift_cli
+- role: openshift_master_certificates
- role: openshift_cloud_provider
- role: openshift_builddefaults
- role: openshift_master_facts
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 7a80ed8e3..0b87ae48c 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -30,6 +30,8 @@
- name: Pull master image
command: >
docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
when: openshift.common.is_containerized | bool
- name: Create openshift.common.data_dir
@@ -210,6 +212,7 @@
until: api_available_output.stdout == 'ok'
retries: 120
delay: 1
+ run_once: true
changed_when: false
when: openshift_master_ha | bool and openshift.master.cluster_method == 'native' and master_api_service_status_changed | bool
diff --git a/roles/openshift_master_ca/README.md b/roles/openshift_master_ca/README.md
deleted file mode 100644
index 5b2d3601b..000000000
--- a/roles/openshift_master_ca/README.md
+++ /dev/null
@@ -1,34 +0,0 @@
-OpenShift Master CA
-========================
-
-TODO
-
-Requirements
-------------
-
-TODO
-
-Role Variables
---------------
-
-TODO
-
-Dependencies
-------------
-
-TODO
-
-Example Playbook
-----------------
-
-TODO
-
-License
--------
-
-Apache License Version 2.0
-
-Author Information
-------------------
-
-Jason DeTiberus (jdetiber@redhat.com)
diff --git a/roles/openshift_master_ca/tasks/main.yml b/roles/openshift_master_ca/tasks/main.yml
deleted file mode 100644
index ae99467f0..000000000
--- a/roles/openshift_master_ca/tasks/main.yml
+++ /dev/null
@@ -1,24 +0,0 @@
----
-
-- name: Install the base package for admin tooling
- action: "{{ ansible_pkg_mgr }} name={{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }} state=present"
- when: not openshift.common.is_containerized | bool
- register: install_result
-
-- name: Reload generated facts
- openshift_facts:
- when: install_result | changed
-
-- name: Create openshift_master_config_dir if it doesn't exist
- file:
- path: "{{ openshift_master_config_dir }}"
- state: directory
-
-- name: Create the master certificates if they do not already exist
- command: >
- {{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ master_hostnames | join(',') }}
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
- --cert-dir={{ openshift_master_config_dir }} --overwrite=false
- when: master_certs_missing | bool
diff --git a/roles/openshift_master_ca/vars/main.yml b/roles/openshift_master_ca/vars/main.yml
deleted file mode 100644
index 1f6af808c..000000000
--- a/roles/openshift_master_ca/vars/main.yml
+++ /dev/null
@@ -1,5 +0,0 @@
----
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
-openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
-openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
-openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
diff --git a/roles/openshift_master_certificates/README.md b/roles/openshift_master_certificates/README.md
index ba3d5f28c..a80d47040 100644
--- a/roles/openshift_master_certificates/README.md
+++ b/roles/openshift_master_certificates/README.md
@@ -1,27 +1,44 @@
OpenShift Master Certificates
========================
-TODO
+This role determines if OpenShift master certificates must be created, delegates certificate creation to the `openshift_ca_host` and then deploys those certificates to master hosts which this role is being applied to. If this role is applied to the `openshift_ca_host`, certificate deployment will be skipped.
Requirements
------------
-TODO
-
Role Variables
--------------
-TODO
+From `openshift_ca`:
+
+| Name | Default value | Description |
+|---------------------------------------|---------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
+| openshift_ca_host | None (Required) | The hostname of the system where the OpenShift CA will be (or has been) created. |
+
+From this role:
+
+| Name | Default value | Description |
+|---------------------------------------|---------------------------------------------------------------------------|-------------------------------------------------------------------------------------------------------------------------------|
+| openshift_generated_configs_dir | `{{ openshift.common.config_base }}/generated-configs` | Directory in which per-master generated config directories will be created on the `openshift_ca_host`. |
+| openshift_master_cert_subdir | `master-{{ openshift.common.hostname }}` | Directory within `openshift_generated_configs_dir` where per-master configurations will be placed on the `openshift_ca_host`. |
+| openshift_master_config_dir | `{{ openshift.common.config_base }}/master` | Master configuration directory in which certificates will be deployed on masters. |
+| openshift_master_generated_config_dir | `{{ openshift_generated_configs_dir }}/{{ openshift_master_cert_subdir }` | Full path to the per-master generated config directory. |
Dependencies
------------
-TODO
+* openshift_ca
Example Playbook
----------------
-TODO
+```
+- name: Create OpenShift Master Certificates
+ hosts: masters
+ roles:
+ - role: openshift_master_certificates
+ openshift_ca_host: master1.example.com
+```
License
-------
diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml
index fd7b73b0f..dd19c8ded 100644
--- a/roles/openshift_master_certificates/meta/main.yml
+++ b/roles/openshift_master_certificates/meta/main.yml
@@ -1,10 +1,10 @@
---
galaxy_info:
author: Jason DeTiberus
- description:
+ description: OpenShift Master Certificates
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 2.1
platforms:
- name: EL
versions:
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_master_ca }
+- role: openshift_ca
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 394f9d381..6fb5830cf 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -1,38 +1,123 @@
---
+- set_fact:
+ openshift_master_certs_no_etcd:
+ - admin.crt
+ - master.kubelet-client.crt
+ - "{{ 'master.proxy-client.crt' if openshift.common.version_gte_3_1_or_1_1 else omit }}"
+ - master.server.crt
+ - openshift-master.crt
+ - openshift-registry.crt
+ - openshift-router.crt
+ - etcd.server.crt
+ openshift_master_certs_etcd:
+ - master.etcd-client.crt
+
+- set_fact:
+ openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd )) if openshift_master_etcd_hosts | length > 0 else openshift_master_certs_no_etcd }}"
+
+- name: Check status of master certificates
+ stat:
+ path: "{{ openshift_master_config_dir }}/{{ item }}"
+ with_items:
+ - "{{ openshift_master_certs }}"
+ register: g_master_cert_stat_result
+
+- set_fact:
+ master_certs_missing: "{{ False in (g_master_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list) }}"
+
- name: Ensure the generated_configs directory present
file:
- path: "{{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}"
+ path: "{{ openshift_master_generated_config_dir }}"
state: directory
mode: 0700
- with_items: "{{ masters_needing_certs | default([]) }}"
+ when: master_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- file:
- src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ src: "{{ openshift_master_config_dir }}/{{ item }}"
+ dest: "{{ openshift_master_generated_config_dir }}/{{ item }}"
state: hard
- with_nested:
- - "{{ masters_needing_certs | default([]) }}"
- -
- - ca.crt
- - ca.key
- - ca.serial.txt
+ with_items:
+ - ca.crt
+ - ca.key
+ - ca.serial.txt
+ when: master_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- name: Create the master certificates if they do not already exist
command: >
{{ openshift.common.admin_binary }} create-master-certs
- --hostnames={{ item.openshift.common.all_hostnames | join(',') }}
- --master={{ item.openshift.master.api_url }}
- --public-master={{ item.openshift.master.public_api_url }}
- --cert-dir={{ openshift_generated_configs_dir }}/{{ item.master_cert_subdir }}
+ --hostnames={{ openshift.common.all_hostnames | join(',') }}
+ --master={{ openshift.master.api_url }}
+ --public-master={{ openshift.master.public_api_url }}
+ --cert-dir={{ openshift_master_generated_config_dir }}
--overwrite=false
- when: item.master_certs_missing | bool
- with_items: "{{ masters_needing_certs | default([]) }}"
+ when: master_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- file:
- src: "{{ openshift_master_config_dir }}/{{ item.1 }}"
- dest: "{{ openshift_generated_configs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
+ src: "{{ openshift_master_config_dir }}/{{ item }}"
+ dest: "{{ openshift_master_generated_config_dir }}/{{ item }}"
state: hard
force: true
- with_nested:
- - "{{ masters_needing_certs | default([]) }}"
+ with_items:
- "{{ hostvars[inventory_hostname] | certificates_to_synchronize }}"
+ when: master_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Remove generated etcd client certs when using external etcd
+ file:
+ path: "{{ openshift_master_generated_config_dir }}/{{ item }}"
+ state: absent
+ when: openshift_master_etcd_hosts | length > 0
+ with_items:
+ - master.etcd-client.crt
+ - master.etcd-client.key
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: g_master_mktemp
+ changed_when: False
+ when: master_certs_missing | bool
+ delegate_to: localhost
+ become: no
+
+- name: Create a tarball of the master certs
+ command: >
+ tar -czvf {{ openshift_master_generated_config_dir }}.tgz
+ -C {{ openshift_master_generated_config_dir }} .
+ args:
+ creates: "{{ openshift_master_generated_config_dir }}.tgz"
+ when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Retrieve the master cert tarball from the master
+ fetch:
+ src: "{{ openshift_master_generated_config_dir }}.tgz"
+ dest: "{{ g_master_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift_master_config_dir }}"
+ state: directory
+ when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+
+- name: Unarchive the tarball on the master
+ unarchive:
+ src: "{{ g_master_mktemp.stdout }}/{{ openshift_master_cert_subdir }}.tgz"
+ dest: "{{ openshift_master_config_dir }}"
+ when: master_certs_missing | bool and inventory_hostname != openshift_ca_host
+
+- file: name={{ g_master_mktemp.stdout }} state=absent
+ changed_when: False
+ when: master_certs_missing | bool
+ delegate_to: localhost
+ become: no
diff --git a/roles/openshift_master_certificates/vars/main.yml b/roles/openshift_master_certificates/vars/main.yml
index 3f18ddc79..66f2e5162 100644
--- a/roles/openshift_master_certificates/vars/main.yml
+++ b/roles/openshift_master_certificates/vars/main.yml
@@ -1,3 +1,5 @@
---
openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
+openshift_master_cert_subdir: "master-{{ openshift.common.hostname }}"
openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_master_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_master_cert_subdir }}"
diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml
index 97ab8241b..fd493340b 100644
--- a/roles/openshift_node/meta/main.yml
+++ b/roles/openshift_node/meta/main.yml
@@ -4,7 +4,7 @@ galaxy_info:
description: OpenShift Node
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.7
+ min_ansible_version: 2.1
platforms:
- name: EL
versions:
@@ -14,6 +14,7 @@ galaxy_info:
dependencies:
- role: openshift_clock
- role: openshift_docker
+- role: openshift_node_certificates
- role: openshift_cloud_provider
- role: openshift_common
- role: openshift_node_dnsmasq
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 97a21544d..889541e25 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -41,11 +41,15 @@
- name: Pull node image
command: >
docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
when: openshift.common.is_containerized | bool
- name: Pull OpenVSwitch image
command: >
docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
when: openshift.common.is_containerized | bool and openshift.common.use_openshift_sdn | bool
- name: Install the systemd units
@@ -129,12 +133,12 @@
service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: node_start_result
ignore_errors: yes
-
+
- name: Wait 30 seconds for docker initialization whenever node has failed
pause:
seconds: 30
when: node_start_result | failed
-
+
- name: Start and enable node again
service: name={{ openshift.common.service_type }}-node enabled=yes state=started
register: node_start_result
diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
index 8fc8497fa..4fd9cd10b 100644
--- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml
@@ -3,14 +3,30 @@
action: "{{ ansible_pkg_mgr }} name=glusterfs-fuse state=present"
when: not openshift.common.is_atomic | bool
-- name: Set sebooleans to allow gluster storage plugin access from containers
+- name: Check for existence of virt_use_fusefs seboolean
+ command: getsebool virt_use_fusefs
+ register: virt_use_fusefs_output
+ when: ansible_selinux and ansible_selinux.status == "enabled"
+ failed_when: false
+ changed_when: false
+
+- name: Set seboolean to allow gluster storage plugin access from containers
seboolean:
- name: "{{ item }}"
+ name: virt_use_fusefs
state: yes
persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled" and virt_use_fusefs_output.rc == 0
+
+- name: Check for existence of virt_sandbox_use_fusefs seboolean
+ command: getsebool virt_sandbox_use_fusefs
+ register: virt_sandbox_use_fusefs_output
when: ansible_selinux and ansible_selinux.status == "enabled"
- with_items:
- - virt_use_fusefs
- - virt_sandbox_use_fusefs
- register: sebool_result
- failed_when: "'state' not in sebool_result and 'msg' in sebool_result and 'SELinux boolean {{ item }} does not exist' not in sebool_result.msg"
+ failed_when: false
+ changed_when: false
+
+- name: Set seboolean to allow gluster storage plugin access from containers(sandbox)
+ seboolean:
+ name: virt_sandbox_use_fusefs
+ state: yes
+ persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_fusefs_output.rc == 0
diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml
index 8380714d4..e384c1bd7 100644
--- a/roles/openshift_node/tasks/storage_plugins/nfs.yml
+++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml
@@ -3,16 +3,30 @@
action: "{{ ansible_pkg_mgr }} name=nfs-utils state=present"
when: not openshift.common.is_atomic | bool
+- name: Check for existence of virt_use_nfs seboolean
+ command: getsebool virt_use_nfs
+ register: virt_use_nfs_output
+ when: ansible_selinux and ansible_selinux.status == "enabled"
+ failed_when: false
+ changed_when: false
+
- name: Set seboolean to allow nfs storage plugin access from containers
seboolean:
name: virt_use_nfs
state: yes
persistent: yes
+ when: ansible_selinux and ansible_selinux.status == "enabled" and virt_use_nfs_output.rc == 0
+
+- name: Check for existence of virt_sandbox_use_nfs seboolean
+ command: getsebool virt_sandbox_use_nfs
+ register: virt_sandbox_use_nfs_output
when: ansible_selinux and ansible_selinux.status == "enabled"
+ failed_when: false
+ changed_when: false
- name: Set seboolean to allow nfs storage plugin access from containers(sandbox)
seboolean:
name: virt_sandbox_use_nfs
state: yes
persistent: yes
- when: ansible_selinux and ansible_selinux.status == "enabled"
+ when: ansible_selinux and ansible_selinux.status == "enabled" and virt_sandbox_use_nfs_output.rc == 0
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 9ba1a01dd..a37770c4a 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -34,7 +34,6 @@ servingInfo:
clientCA: ca.crt
keyFile: server.key
volumeDirectory: {{ openshift.common.data_dir }}/openshift.local.volumes
-{% include 'partials/kubeletArguments.j2' %}
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
diff --git a/roles/openshift_node/templates/partials/kubeletArguments.j2 b/roles/openshift_node/templates/partials/kubeletArguments.j2
deleted file mode 100644
index 6c3bd04c5..000000000
--- a/roles/openshift_node/templates/partials/kubeletArguments.j2
+++ /dev/null
@@ -1,5 +0,0 @@
-{% if openshift.common.use_cluster_metrics | bool %}
-kubeletArguments:
- "read-only-port":
- - "10255"
-{% endif %} \ No newline at end of file
diff --git a/roles/openshift_node_certificates/README.md b/roles/openshift_node_certificates/README.md
index 6264d253a..f56066b29 100644
--- a/roles/openshift_node_certificates/README.md
+++ b/roles/openshift_node_certificates/README.md
@@ -1,27 +1,44 @@
-OpenShift/Atomic Enterprise Node Certificates
-=============================================
+OpenShift Node Certificates
+===========================
-TODO
+This role determines if OpenShift node certificates must be created, delegates certificate creation to the `openshift_ca_host` and then deploys those certificates to node hosts which this role is being applied to.
Requirements
------------
-TODO
-
Role Variables
--------------
-TODO
+From `openshift_ca`:
+
+| Name | Default value | Description |
+|-------------------------------------|-------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|
+| openshift_ca_host | None (Required) | The hostname of the system where the OpenShift CA will be (or has been) created. |
+
+From this role:
+
+| Name | Default value | Description |
+|-------------------------------------|-------------------------------------------------------------------------|---------------------------------------------------------------------------------------------------------------------------|
+| openshift_generated_configs_dir | `{{ openshift.common.config_base }}/generated-configs` | Directory in which per-node generated config directories will be created on the `openshift_ca_host`. |
+| openshift_node_cert_subdir | `node-{{ openshift.common.hostname }}` | Directory within `openshift_generated_configs_dir` where per-node certificates will be placed on the `openshift_ca_host`. |
+| openshift_node_config_dir | `{{ openshift.common.config_base }}/node` | Node configuration directory in which certificates will be deployed on nodes. |
+| openshift_node_generated_config_dir | `{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }` | Full path to the per-node generated config directory. |
Dependencies
------------
-TODO
+* openshift_ca
Example Playbook
----------------
-TODO
+```
+- name: Create OpenShift Node Certificates
+ hosts: nodes
+ roles:
+ - role: openshift_node_certificates
+ openshift_ca_host: master1.example.com
+```
License
-------
diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml
index f3236e850..50a862ee9 100644
--- a/roles/openshift_node_certificates/meta/main.yml
+++ b/roles/openshift_node_certificates/meta/main.yml
@@ -1,10 +1,10 @@
---
galaxy_info:
author: Jason DeTiberus
- description:
+ description: OpenShift Node Certificates
company: Red Hat, Inc.
license: Apache License, Version 2.0
- min_ansible_version: 1.8
+ min_ansible_version: 2.1
platforms:
- name: EL
versions:
@@ -13,4 +13,4 @@ galaxy_info:
- cloud
- system
dependencies:
-- { role: openshift_facts }
+- role: openshift_facts
diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml
index 216c11093..0e69dc6f0 100644
--- a/roles/openshift_node_certificates/tasks/main.yml
+++ b/roles/openshift_node_certificates/tasks/main.yml
@@ -1,36 +1,117 @@
---
-- name: Create openshift_generated_configs_dir if it doesn\'t exist
+- name: Ensure CA certificate exists on openshift_ca_host
+ stat:
+ path: "{{ openshift_ca_cert }}"
+ register: g_ca_cert_stat_result
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- fail:
+ msg: >
+ CA certificate {{ openshift_ca_cert }} doesn't exist on CA host
+ {{ openshift_ca_host }}. Apply 'openshift_ca' role to
+ {{ openshift_ca_host }}.
+ when: not g_ca_cert_stat_result.stat.exists | bool
+ run_once: true
+
+- name: Check status of node certificates
+ stat:
+ path: "{{ openshift.common.config_base }}/node/{{ item }}"
+ with_items:
+ - "system:node:{{ openshift.common.hostname }}.crt"
+ - "system:node:{{ openshift.common.hostname }}.key"
+ - "system:node:{{ openshift.common.hostname }}.kubeconfig"
+ - ca.crt
+ - server.key
+ - server.crt
+ register: g_node_cert_stat_result
+
+- set_fact:
+ node_certs_missing: "{{ False in (g_node_cert_stat_result.results
+ | oo_collect(attribute='stat.exists')
+ | list) }}"
+
+- name: Create openshift_generated_configs_dir if it does not exist
file:
path: "{{ openshift_generated_configs_dir }}"
state: directory
mode: 0700
- when: nodes_needing_certs | length > 0
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- name: Generate the node client config
command: >
{{ openshift.common.admin_binary }} create-api-client-config
- --certificate-authority={{ openshift_master_ca_cert }}
- --client-dir={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}
+ --certificate-authority={{ openshift_ca_cert }}
+ --client-dir={{ openshift_node_generated_config_dir }}
--groups=system:nodes
- --master={{ openshift.master.api_url }}
- --signer-cert={{ openshift_master_ca_cert }}
- --signer-key={{ openshift_master_ca_key }}
- --signer-serial={{ openshift_master_ca_serial }}
- --user=system:node:{{ item.openshift.common.hostname }}
+ --master={{ hostvars[openshift_ca_host].openshift.master.api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:node:{{ openshift.common.hostname }}
args:
- creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}"
- with_items: "{{ nodes_needing_certs | default([]) }}"
+ creates: "{{ openshift_node_generated_config_dir }}"
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
- name: Generate the node server certificate
command: >
{{ openshift.common.admin_binary }} ca create-server-cert
- --cert={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt
- --key={{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.key
+ --cert={{ openshift_node_generated_config_dir }}/server.crt
+ --key={{ openshift_generated_configs_dir }}/node-{{ openshift.common.hostname }}/server.key
--overwrite=true
- --hostnames={{ item.openshift.common.all_hostnames |join(",") }}
- --signer-cert={{ openshift_master_ca_cert }}
- --signer-key={{ openshift_master_ca_key }}
- --signer-serial={{ openshift_master_ca_serial }}
+ --hostnames={{ openshift.common.all_hostnames |join(",") }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ args:
+ creates: "{{ openshift_node_generated_config_dir }}/server.crt"
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host}}"
+
+- name: Create local temp directory for syncing certs
+ local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: node_cert_mktemp
+ changed_when: False
+ when: node_certs_missing | bool
+ delegate_to: localhost
+ become: no
+
+- name: Create a tarball of the node config directories
+ command: >
+ tar -czvf {{ openshift_node_generated_config_dir }}.tgz
+ --transform 's|system:{{ openshift_node_cert_subdir }}|node|'
+ -C {{ openshift_node_generated_config_dir }} .
args:
- creates: "{{ openshift_generated_configs_dir }}/node-{{ item.openshift.common.hostname }}/server.crt"
- with_items: "{{ nodes_needing_certs | default([]) }}"
+ creates: "{{ openshift_node_generated_config_dir }}.tgz"
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Retrieve the node config tarballs from the master
+ fetch:
+ src: "{{ openshift_node_generated_config_dir }}.tgz"
+ dest: "{{ node_cert_mktemp.stdout }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
+ when: node_certs_missing | bool
+ delegate_to: "{{ openshift_ca_host }}"
+
+- name: Ensure certificate directory exists
+ file:
+ path: "{{ openshift_node_cert_dir }}"
+ state: directory
+ when: node_certs_missing | bool
+
+- name: Unarchive the tarball on the node
+ unarchive:
+ src: "{{ node_cert_mktemp.stdout }}/{{ openshift_node_cert_subdir }}.tgz"
+ dest: "{{ openshift_node_cert_dir }}"
+ when: node_certs_missing | bool
+
+- file: name={{ node_cert_mktemp.stdout }} state=absent
+ changed_when: False
+ when: node_certs_missing | bool
+ delegate_to: localhost
+ become: no
diff --git a/roles/openshift_node_certificates/vars/main.yml b/roles/openshift_node_certificates/vars/main.yml
index 61fbb1e51..17ad8106d 100644
--- a/roles/openshift_node_certificates/vars/main.yml
+++ b/roles/openshift_node_certificates/vars/main.yml
@@ -1,7 +1,11 @@
---
-openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
-openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
openshift_generated_configs_dir: "{{ openshift.common.config_base }}/generated-configs"
-openshift_master_ca_cert: "{{ openshift_master_config_dir }}/ca.crt"
-openshift_master_ca_key: "{{ openshift_master_config_dir }}/ca.key"
-openshift_master_ca_serial: "{{ openshift_master_config_dir }}/ca.serial.txt"
+openshift_node_cert_dir: "{{ openshift.common.config_base }}/node"
+openshift_node_cert_subdir: "node-{{ openshift.common.hostname }}"
+openshift_node_config_dir: "{{ openshift.common.config_base }}/node"
+openshift_node_generated_config_dir: "{{ openshift_generated_configs_dir }}/{{ openshift_node_cert_subdir }}"
+
+openshift_ca_config_dir: "{{ openshift.common.config_base }}/master"
+openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
+openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
+openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index 0a134f557..6e5d2b22c 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -76,5 +76,12 @@
# We can't map an openshift_release to full rpm version like we can with containers, make sure
# the rpm version we looked up matches the release requested and error out if not.
- fail:
- msg: "Detected openshift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories or specify an exact openshift_pkg_version."
+ msg: "Detected OpenShift version {{ openshift_version }} does not match requested openshift_release {{ openshift_release }}. You may need to adjust your yum repositories, inventory, or run the appropriate OpenShift upgrade playbook."
when: not is_containerized | bool and openshift_release is defined and not openshift_version.startswith(openshift_release) | bool
+
+# The end result of these three variables is quite important so make sure they are displayed and logged:
+- debug: var=openshift_release
+
+- debug: var=openshift_image_tag
+
+- debug: var=openshift_pkg_version
diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py
index d6f2540bc..c9c13501d 100644
--- a/utils/src/ooinstall/cli_installer.py
+++ b/utils/src/ooinstall/cli_installer.py
@@ -10,7 +10,7 @@ import click
from ooinstall import openshift_ansible
from ooinstall import OOConfig
from ooinstall.oo_config import OOConfigInvalidHostError
-from ooinstall.oo_config import Host
+from ooinstall.oo_config import Host, Role
from ooinstall.variants import find_variant, get_variant_version_combos
DEFAULT_ANSIBLE_CONFIG = '/usr/share/atomic-openshift-utils/ansible.cfg'
@@ -121,21 +121,23 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
click.echo(message)
hosts = []
+ roles = set(['master', 'node', 'storage'])
more_hosts = True
num_masters = 0
while more_hosts:
host_props = {}
+ host_props['roles'] = []
host_props['connect_to'] = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname)
if not masters_set:
if click.confirm('Will this host be an OpenShift Master?'):
- host_props['master'] = True
+ host_props['roles'].append('master')
num_masters += 1
if oo_cfg.settings['variant_version'] == '3.0':
masters_set = True
- host_props['node'] = True
+ host_props['roles'].append('node')
host_props['containerized'] = False
if oo_cfg.settings['variant_version'] != '3.0':
@@ -152,6 +154,7 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
hosts.append(host)
+
if print_summary:
print_installation_summary(hosts, oo_cfg.settings['variant_version'])
@@ -163,11 +166,12 @@ http://docs.openshift.com/enterprise/latest/architecture/infrastructure_componen
if num_masters >= 3:
collect_master_lb(hosts)
+ roles.add('master_lb')
if not existing_env:
collect_storage_host(hosts)
- return hosts
+ return hosts, roles
def print_installation_summary(hosts, version=None):
@@ -184,9 +188,9 @@ def print_installation_summary(hosts, version=None):
for host in hosts:
print_host_summary(hosts, host)
- masters = [host for host in hosts if host.master]
- nodes = [host for host in hosts if host.node]
- dedicated_nodes = [host for host in hosts if host.node and not host.master]
+ masters = [host for host in hosts if host.is_master()]
+ nodes = [host for host in hosts if host.is_node()]
+ dedicated_nodes = [host for host in hosts if host.is_node() and not host.is_master()]
click.echo('')
click.echo('Total OpenShift Masters: %s' % len(masters))
click.echo('Total OpenShift Nodes: %s' % len(nodes))
@@ -226,26 +230,26 @@ deployment."""
def print_host_summary(all_hosts, host):
click.echo("- %s" % host.connect_to)
- if host.master:
+ if host.is_master():
click.echo(" - OpenShift Master")
- if host.node:
+ if host.is_node():
if host.is_dedicated_node():
click.echo(" - OpenShift Node (Dedicated)")
elif host.is_schedulable_node(all_hosts):
click.echo(" - OpenShift Node")
else:
click.echo(" - OpenShift Node (Unscheduled)")
- if host.master_lb:
+ if host.is_master_lb():
if host.preconfigured:
click.echo(" - Load Balancer (Preconfigured)")
else:
click.echo(" - Load Balancer (HAProxy)")
- if host.master:
+ if host.is_master():
if host.is_etcd_member(all_hosts):
click.echo(" - Etcd Member")
else:
click.echo(" - Etcd (Embedded)")
- if host.storage:
+ if host.is_storage():
click.echo(" - Storage")
@@ -279,7 +283,7 @@ hostname.
# Make sure this host wasn't already specified:
for host in hosts:
- if host.connect_to == hostname and (host.master or host.node):
+ if host.connect_to == hostname and (host.is_master() or host.is_node()):
raise click.BadParameter('Cannot re-use "%s" as a load balancer, '
'please specify a separate host' % hostname)
return hostname
@@ -289,9 +293,7 @@ hostname.
install_haproxy = \
click.confirm('Should the reference haproxy load balancer be installed on this host?')
host_props['preconfigured'] = not install_haproxy
- host_props['master'] = False
- host_props['node'] = False
- host_props['master_lb'] = True
+ host_props['roles'] = ['master_lb']
master_lb = Host(**host_props)
hosts.append(master_lb)
@@ -309,14 +311,14 @@ Note: Containerized storage hosts are not currently supported.
click.echo(message)
host_props = {}
- first_master = next(host for host in hosts if host.master)
+ first_master = next(host for host in hosts if host.is_master())
hostname_or_ip = click.prompt('Enter hostname or IP address',
value_proc=validate_prompt_hostname,
default=first_master.connect_to)
existing, existing_host = is_host_already_node_or_master(hostname_or_ip, hosts)
- if existing and existing_host.node:
- existing_host.storage = True
+ if existing and existing_host.is_node():
+ existing_host.roles.append('storage')
else:
host_props['connect_to'] = hostname_or_ip
host_props['preconfigured'] = False
@@ -331,14 +333,14 @@ def is_host_already_node_or_master(hostname, hosts):
existing_host = None
for host in hosts:
- if host.connect_to == hostname and (host.master or host.node):
+ if host.connect_to == hostname and (host.is_master() or host.is_node()):
is_existing = True
existing_host = host
return is_existing, existing_host
def confirm_hosts_facts(oo_cfg, callback_facts):
- hosts = oo_cfg.hosts
+ hosts = oo_cfg.deployment.hosts
click.clear()
message = """
A list of the facts gathered from the provided hosts follows. Because it is
@@ -414,19 +416,20 @@ Edit %s with the desired values and run `atomic-openshift-installer --unattended
def check_hosts_config(oo_cfg, unattended):
click.clear()
- masters = [host for host in oo_cfg.hosts if host.master]
+ masters = [host for host in oo_cfg.deployment.hosts if host.is_master()]
if len(masters) == 2:
click.echo("A minimum of 3 Masters are required for HA deployments.")
sys.exit(1)
if len(masters) > 1:
- master_lb = [host for host in oo_cfg.hosts if host.master_lb]
+ master_lb = [host for host in oo_cfg.deployment.hosts if host.is_master_lb()]
+
if len(master_lb) > 1:
click.echo('ERROR: More than one Master load balancer specified. Only one is allowed.')
sys.exit(1)
elif len(master_lb) == 1:
- if master_lb[0].master or master_lb[0].node:
+ if master_lb[0].is_master() or master_lb[0].is_node():
click.echo('ERROR: The Master load balancer is configured as a master or node. ' \
'Please correct this.')
sys.exit(1)
@@ -440,7 +443,8 @@ https://docs.openshift.org/latest/install_config/install/advanced_install.html#m
click.echo(message)
sys.exit(1)
- dedicated_nodes = [host for host in oo_cfg.hosts if host.node and not host.master]
+ dedicated_nodes = [host for host in oo_cfg.deployment.hosts \
+ if host.is_node() and not host.is_master()]
if len(dedicated_nodes) == 0:
message = """
WARNING: No dedicated Nodes specified. By default, colocated Masters have
@@ -481,7 +485,7 @@ def confirm_continue(message):
def error_if_missing_info(oo_cfg):
missing_info = False
- if not oo_cfg.hosts:
+ if not oo_cfg.deployment.hosts:
missing_info = True
click.echo('For unattended installs, hosts must be specified on the '
'command line or in the config file: %s' % oo_cfg.config_path)
@@ -508,16 +512,24 @@ def error_if_missing_info(oo_cfg):
sys.exit(1)
oo_cfg.settings['variant_version'] = version.name
- missing_facts = oo_cfg.calc_missing_facts()
- if len(missing_facts) > 0:
+ # check that all listed host roles are included
+ listed_roles = get_host_roles_set(oo_cfg)
+ configured_roles = set([role for role in oo_cfg.deployment.roles])
+ if listed_roles != configured_roles:
missing_info = True
- click.echo('For unattended installs, facts must be provided for all masters/nodes:')
- for host in missing_facts:
- click.echo('Host "%s" missing facts: %s' % (host, ", ".join(missing_facts[host])))
+ click.echo('Any roles assigned to hosts must be defined.')
if missing_info:
sys.exit(1)
+def get_host_roles_set(oo_cfg):
+ roles_set = set()
+ for host in oo_cfg.deployment.hosts:
+ for role in host.roles:
+ roles_set.add(role)
+
+ return roles_set
+
def get_proxy_hostnames_and_excludes():
message = """
If a proxy is needed to reach HTTP and HTTPS traffic please enter the name below.
@@ -574,35 +586,51 @@ https://docs.openshift.com/enterprise/latest/admin_guide/install/prerequisites.h
confirm_continue(message)
click.clear()
- if oo_cfg.settings.get('ansible_ssh_user', '') == '':
+ if not oo_cfg.settings.get('ansible_ssh_user', ''):
oo_cfg.settings['ansible_ssh_user'] = get_ansible_ssh_user()
click.clear()
- if oo_cfg.settings.get('variant', '') == '':
+ if not oo_cfg.settings.get('variant', ''):
variant, version = get_variant_and_version()
oo_cfg.settings['variant'] = variant.name
oo_cfg.settings['variant_version'] = version.name
click.clear()
- if not oo_cfg.hosts:
- oo_cfg.hosts = collect_hosts(oo_cfg)
+ if not oo_cfg.deployment.hosts:
+ oo_cfg.deployment.hosts, roles = collect_hosts(oo_cfg)
+
+ for role in roles:
+ oo_cfg.deployment.roles[role] = Role(name=role, variables={})
click.clear()
- if not oo_cfg.settings.get('master_routingconfig_subdomain', None):
- oo_cfg.settings['master_routingconfig_subdomain'] = get_master_routingconfig_subdomain()
+ if not 'master_routingconfig_subdomain' in oo_cfg.deployment.variables:
+ oo_cfg.deployment.variables['master_routingconfig_subdomain'] = \
+ get_master_routingconfig_subdomain()
click.clear()
if not oo_cfg.settings.get('openshift_http_proxy', None) and \
LooseVersion(oo_cfg.settings.get('variant_version', '0.0')) >= LooseVersion('3.2'):
http_proxy, https_proxy, proxy_excludes = get_proxy_hostnames_and_excludes()
- oo_cfg.settings['openshift_http_proxy'] = http_proxy
- oo_cfg.settings['openshift_https_proxy'] = https_proxy
- oo_cfg.settings['openshift_no_proxy'] = proxy_excludes
+ oo_cfg.deployment.variables['proxy_http'] = http_proxy
+ oo_cfg.deployment.variables['proxy_https'] = https_proxy
+ oo_cfg.deployment.variables['proxy_exclude_hosts'] = proxy_excludes
click.clear()
return oo_cfg
+def get_role_variable(oo_cfg, role_name, variable_name):
+ try:
+ target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name)
+ target_variable = target_role.variables[variable_name]
+ return target_variable
+ except (StopIteration, KeyError):
+ return None
+
+def set_role_variable(oo_cfg, role_name, variable_name, variable_value):
+ target_role = next(role for role in oo_cfg.deployment.roles if role.name is role_name)
+ target_role[variable_name] = variable_value
+
def collect_new_nodes(oo_cfg):
click.clear()
click.echo('*** New Node Configuration ***')
@@ -610,14 +638,15 @@ def collect_new_nodes(oo_cfg):
Add new nodes here
"""
click.echo(message)
- return collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
+ new_nodes, _ = collect_hosts(oo_cfg, existing_env=True, masters_set=True, print_summary=False)
+ return new_nodes
def get_installed_hosts(hosts, callback_facts):
installed_hosts = []
# count nativeha lb as an installed host
try:
- first_master = next(host for host in hosts if host.master)
+ first_master = next(host for host in hosts if host.is_master())
lb_hostname = callback_facts[first_master.connect_to]['master'].get('cluster_hostname', '')
lb_host = \
next(host for host in hosts if host.ip == callback_facts[lb_hostname]['common']['ip'])
@@ -636,17 +665,17 @@ def is_installed_host(host, callback_facts):
callback_facts[host.connect_to]['common'].get('version', '') and \
callback_facts[host.connect_to]['common'].get('version', '') != 'None'
- return version_found or host.master_lb or host.preconfigured
+ return version_found
# pylint: disable=too-many-branches
# This pylint error will be corrected shortly in separate PR.
def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# Copy the list of existing hosts so we can remove any already installed nodes.
- hosts_to_run_on = list(oo_cfg.hosts)
+ hosts_to_run_on = list(oo_cfg.deployment.hosts)
# Check if master or nodes already have something installed
- installed_hosts = get_installed_hosts(oo_cfg.hosts, callback_facts)
+ installed_hosts = get_installed_hosts(oo_cfg.deployment.hosts, callback_facts)
if len(installed_hosts) > 0:
click.echo('Installed environment detected.')
# This check has to happen before we start removing hosts later in this method
@@ -668,11 +697,11 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
# present a message listing already installed hosts and remove hosts if needed
for host in installed_hosts:
- if host.master:
+ if host.is_master():
click.echo("{} is already an OpenShift Master".format(host))
# Masters stay in the list, we need to run against them when adding
# new nodes.
- elif host.node:
+ elif host.is_node():
click.echo("{} is already an OpenShift Node".format(host))
# force is only used for reinstalls so we don't want to remove
# anything.
@@ -699,11 +728,11 @@ def get_hosts_to_run_on(oo_cfg, callback_facts, unattended, force, verbose):
new_nodes = collect_new_nodes(oo_cfg)
hosts_to_run_on.extend(new_nodes)
- oo_cfg.hosts.extend(new_nodes)
+ oo_cfg.deployment.hosts.extend(new_nodes)
openshift_ansible.set_config(oo_cfg)
click.echo('Gathering information from hosts...')
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts, verbose)
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts, verbose)
if error or callback_facts is None:
click.echo("There was a problem fetching the required information. See " \
"{} for details.".format(oo_cfg.settings['ansible_log_path']))
@@ -800,14 +829,14 @@ def uninstall(ctx):
oo_cfg = ctx.obj['oo_cfg']
verbose = ctx.obj['verbose']
- if len(oo_cfg.hosts) == 0:
+ if len(oo_cfg.deployment.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
click.echo("OpenShift will be uninstalled from the following hosts:\n")
if not ctx.obj['unattended']:
# Prompt interactively to confirm:
- for host in oo_cfg.hosts:
+ for host in oo_cfg.deployment.hosts:
click.echo(" * %s" % host.connect_to)
proceed = click.confirm("\nDo you wish to proceed?")
if not proceed:
@@ -841,7 +870,7 @@ def upgrade(ctx, latest_minor, next_major):
},
}
- if len(oo_cfg.hosts) == 0:
+ if len(oo_cfg.deployment.hosts) == 0:
click.echo("No hosts defined in: %s" % oo_cfg.config_path)
sys.exit(1)
@@ -901,7 +930,7 @@ def upgrade(ctx, latest_minor, next_major):
click.echo("Openshift will be upgraded from %s %s to latest %s %s on the following hosts:\n" % (
variant, old_version, oo_cfg.settings['variant'], new_version))
- for host in oo_cfg.hosts:
+ for host in oo_cfg.deployment.hosts:
click.echo(" * %s" % host.connect_to)
if not ctx.obj['unattended']:
@@ -936,10 +965,11 @@ def install(ctx, force, gen_inventory):
check_hosts_config(oo_cfg, ctx.obj['unattended'])
- print_installation_summary(oo_cfg.hosts, oo_cfg.settings.get('variant_version', None))
+ print_installation_summary(oo_cfg.deployment.hosts, oo_cfg.settings.get('variant_version', None))
click.echo('Gathering information from hosts...')
- callback_facts, error = openshift_ansible.default_facts(oo_cfg.hosts,
+ callback_facts, error = openshift_ansible.default_facts(oo_cfg.deployment.hosts,
verbose)
+
if error or callback_facts is None:
click.echo("There was a problem fetching the required information. " \
"Please see {} for details.".format(oo_cfg.settings['ansible_log_path']))
@@ -954,11 +984,12 @@ def install(ctx, force, gen_inventory):
# TODO: if there are *new* nodes and this is a live install, we may need the user
# to confirm the settings for new nodes. Look into this once we're distinguishing
# between new and pre-existing nodes.
- if len(oo_cfg.calc_missing_facts()) > 0:
+ if not ctx.obj['unattended'] and len(oo_cfg.calc_missing_facts()) > 0:
confirm_hosts_facts(oo_cfg, callback_facts)
# Write quick installer config file to disk:
oo_cfg.save_to_disk()
+
# Write ansible inventory file to disk:
inventory_file = openshift_ansible.generate_inventory(hosts_to_run_on)
@@ -977,8 +1008,9 @@ If changes are needed please edit the config file above and re-run.
if not ctx.obj['unattended']:
confirm_continue(message)
- error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.hosts,
+ error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts,
hosts_to_run_on, verbose)
+
if error:
# The bootstrap script will print out the log location.
message = """
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index 24dfbe013..fc06a0c4a 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -1,26 +1,32 @@
-# TODO: Temporarily disabled due to importing old code into openshift-ansible
-# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
import os
+import sys
import yaml
from pkg_resources import resource_filename
-PERSIST_SETTINGS = [
+CONFIG_PERSIST_SETTINGS = [
'ansible_ssh_user',
+ 'ansible_callback_facts_yaml',
'ansible_config',
+ 'ansible_inventory_path',
'ansible_log_path',
- 'master_routingconfig_subdomain',
- 'proxy',
- 'proxy_exclude_hosts',
+ 'deployment',
+ 'version',
'variant',
'variant_version',
- 'version',
]
+
+DEPLOYMENT_PERSIST_SETTINGS = [
+ 'master_routingconfig_subdomain',
+ 'proxy_http',
+ 'proxy_https',
+ 'proxy_exclude_hosts',
+]
+
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
-
class OOConfigFileError(Exception):
"""The provided config file path can't be read/written
"""
@@ -40,31 +46,19 @@ class Host(object):
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
+
self.preconfigured = kwargs.get('preconfigured', None)
+ self.schedulable = kwargs.get('schedulable', None)
self.new_host = kwargs.get('new_host', None)
-
- # Should this host run as an OpenShift master:
- self.master = kwargs.get('master', False)
-
- # Should this host run as an OpenShift node:
- self.node = kwargs.get('node', False)
-
- # Should this host run as an HAProxy:
- self.master_lb = kwargs.get('master_lb', False)
-
- # Should this host run as an HAProxy:
- self.storage = kwargs.get('storage', False)
-
self.containerized = kwargs.get('containerized', False)
+ self.node_labels = kwargs.get('node_labels', '')
- if self.connect_to is None:
- raise OOConfigInvalidHostError("You must specify either an ip " \
- "or hostname as 'connect_to'")
+ # allowable roles: master, node, etcd, storage, master_lb, new
+ self.roles = kwargs.get('roles', [])
- if self.master is False and self.node is False and \
- self.master_lb is False and self.storage is False:
+ if self.connect_to is None:
raise OOConfigInvalidHostError(
- "You must specify each host as either a master or a node.")
+ "You must specify either an ip or hostname as 'connect_to'")
def __str__(self):
return self.connect_to
@@ -75,41 +69,83 @@ class Host(object):
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
- for prop in ['ip', 'hostname', 'public_ip', 'public_hostname',
- 'master', 'node', 'master_lb', 'storage', 'containerized',
- 'connect_to', 'preconfigured', 'new_host']:
+
+ for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', 'connect_to',
+ 'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
return d
+ def is_master(self):
+ return 'master' in self.roles
+
+ def is_node(self):
+ return 'node' in self.roles
+
+ def is_master_lb(self):
+ return 'master_lb' in self.roles
+
+ def is_storage(self):
+ return 'storage' in self.roles
+
+
def is_etcd_member(self, all_hosts):
""" Will this host be a member of a standalone etcd cluster. """
- if not self.master:
+ if not self.is_master():
return False
- masters = [host for host in all_hosts if host.master]
+ masters = [host for host in all_hosts if host.is_master()]
if len(masters) > 1:
return True
return False
def is_dedicated_node(self):
""" Will this host be a dedicated node. (not a master) """
- return self.node and not self.master
+ return self.is_node() and not self.is_master()
def is_schedulable_node(self, all_hosts):
""" Will this host be a node marked as schedulable. """
- if not self.node:
+ if not self.is_node():
return False
- if not self.master:
+ if not self.is_master():
return True
- masters = [host for host in all_hosts if host.master]
- nodes = [host for host in all_hosts if host.node]
+ masters = [host for host in all_hosts if host.is_master()]
+ nodes = [host for host in all_hosts if host.is_node()]
if len(masters) == len(nodes):
return True
return False
+class Role(object):
+ """ A role that will be applied to a host. """
+ def __init__(self, name, variables):
+ self.name = name
+ self.variables = variables
+
+ def __str__(self):
+ return self.name
+
+ def __repr__(self):
+ return self.name
+
+ def to_dict(self):
+ """ Used when exporting to yaml. """
+ d = {}
+ for prop in ['name', 'variables']:
+ # If the property is defined (not None or False), export it:
+ if getattr(self, prop):
+ d[prop] = getattr(self, prop)
+ return d
+
+
+class Deployment(object):
+ def __init__(self, **kwargs):
+ self.hosts = kwargs.get('hosts', [])
+ self.roles = kwargs.get('roles', {})
+ self.variables = kwargs.get('variables', {})
+
+
class OOConfig(object):
default_dir = os.path.normpath(
os.environ.get('XDG_CONFIG_HOME',
@@ -122,32 +158,51 @@ class OOConfig(object):
else:
self.config_path = os.path.normpath(self.default_dir +
self.default_file)
+ self.deployment = Deployment(hosts=[], roles={}, variables={})
self.settings = {}
self._read_config()
self._set_defaults()
+
def _read_config(self):
- self.hosts = []
try:
if os.path.exists(self.config_path):
- cfgfile = open(self.config_path, 'r')
- self.settings = yaml.safe_load(cfgfile.read())
- cfgfile.close()
+ with open(self.config_path, 'r') as cfgfile:
+ loaded_config = yaml.safe_load(cfgfile.read())
# Use the presence of a Description as an indicator this is
# a legacy config file:
if 'Description' in self.settings:
self._upgrade_legacy_config()
+ try:
+ host_list = loaded_config['deployment']['hosts']
+ role_list = loaded_config['deployment']['roles']
+ except KeyError as e:
+ print "Error loading config, no such key: {}".format(e)
+ sys.exit(0)
+
+ for setting in CONFIG_PERSIST_SETTINGS:
+ try:
+ self.settings[setting] = str(loaded_config[setting])
+ except KeyError:
+ continue
+
+ for setting in DEPLOYMENT_PERSIST_SETTINGS:
+ try:
+ self.deployment.variables[setting] = \
+ str(loaded_config['deployment'][setting])
+ except KeyError:
+ continue
+
# Parse the hosts into DTO objects:
- if 'hosts' in self.settings:
- for host in self.settings['hosts']:
- self.hosts.append(Host(**host))
+ for host in host_list:
+ self.deployment.hosts.append(Host(**host))
+
+ # Parse the roles into Objects
+ for name, variables in role_list.iteritems():
+ self.deployment.roles.update({name: Role(name, variables)})
- # Watchout for the variant_version coming in as a float:
- if 'variant_version' in self.settings:
- self.settings['variant_version'] = \
- str(self.settings['variant_version'])
except IOError, ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
@@ -179,18 +234,21 @@ class OOConfig(object):
self.settings['variant'] = 'openshift-enterprise'
self.settings['variant_version'] = '3.0'
+ def _upgrade_v1_config(self):
+ #TODO write code to upgrade old config
+ return
+
def _set_defaults(self):
if 'ansible_inventory_directory' not in self.settings:
- self.settings['ansible_inventory_directory'] = \
- self._default_ansible_inv_dir()
+ self.settings['ansible_inventory_directory'] = self._default_ansible_inv_dir()
if not os.path.exists(self.settings['ansible_inventory_directory']):
os.makedirs(self.settings['ansible_inventory_directory'])
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = \
resource_filename(__name__, 'ansible_plugins')
if 'version' not in self.settings:
- self.settings['version'] = 'v1'
+ self.settings['version'] = 'v2'
if 'ansible_callback_facts_yaml' not in self.settings:
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
@@ -219,7 +277,7 @@ class OOConfig(object):
"""
result = {}
- for host in self.hosts:
+ for host in self.deployment.hosts:
missing_facts = []
if host.preconfigured:
required_facts = PRECONFIGURED_REQUIRED_FACTS
@@ -240,17 +298,36 @@ class OOConfig(object):
def persist_settings(self):
p_settings = {}
- for setting in PERSIST_SETTINGS:
+
+ for setting in CONFIG_PERSIST_SETTINGS:
if setting in self.settings and self.settings[setting]:
p_settings[setting] = self.settings[setting]
- p_settings['hosts'] = []
- for host in self.hosts:
- p_settings['hosts'].append(host.to_dict())
- if self.settings['ansible_inventory_directory'] != \
- self._default_ansible_inv_dir():
- p_settings['ansible_inventory_directory'] = \
- self.settings['ansible_inventory_directory']
+
+ p_settings['deployment'] = {}
+ p_settings['deployment']['hosts'] = []
+ p_settings['deployment']['roles'] = {}
+
+ for setting in DEPLOYMENT_PERSIST_SETTINGS:
+ if setting in self.deployment.variables:
+ p_settings['deployment'][setting] = self.deployment.variables[setting]
+
+ for host in self.deployment.hosts:
+ p_settings['deployment']['hosts'].append(host.to_dict())
+
+ for name, role in self.deployment.roles.iteritems():
+ p_settings['deployment']['roles'][name] = role.variables
+
+ try:
+ p_settings['variant'] = self.settings['variant']
+ p_settings['variant_version'] = self.settings['variant_version']
+
+ if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir():
+ p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory']
+ except KeyError as e:
+ print "Error persisting settings: {}".format(e)
+ sys.exit(0)
+
return p_settings
@@ -261,7 +338,7 @@ class OOConfig(object):
return self.yaml()
def get_host(self, name):
- for host in self.hosts:
+ for host in self.deployment.hosts:
if host.connect_to == name:
return host
return None
diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py
index 97aee0b53..352955026 100644
--- a/utils/src/ooinstall/openshift_ansible.py
+++ b/utils/src/ooinstall/openshift_ansible.py
@@ -11,15 +11,32 @@ from ooinstall.variants import find_variant
CFG = None
+ROLES_TO_GROUPS_MAP = {
+ 'master': 'masters',
+ 'node': 'nodes',
+ 'storage': 'nfs'
+}
+
+VARIABLES_MAP = {
+ 'ansible_ssh_user': 'ansible_ssh_user',
+ 'ansible_config': 'ansible_config',
+ 'ansible_log_path': 'ansible_log_path',
+ 'deployment_type': 'deployment_type',
+ 'master_routingconfig_subdomain':'openshift_master_default_subdomain',
+ 'proxy_http':'openshift_http_proxy',
+ 'proxy_https': 'openshift_https_proxy',
+ 'proxy_exclude_hosts': 'openshift_no_proxy',
+}
+
def set_config(cfg):
global CFG
CFG = cfg
def generate_inventory(hosts):
global CFG
- masters = [host for host in hosts if host.master]
- nodes = [host for host in hosts if host.node]
- new_nodes = [host for host in hosts if host.node and host.new_host]
+ masters = [host for host in hosts if host.is_master()]
+ nodes = [host for host in hosts if host.is_node()]
+ new_nodes = [host for host in hosts if host.is_node() and host.new_host]
proxy = determine_proxy_configuration(hosts)
storage = determine_storage_configuration(hosts)
multiple_masters = len(masters) > 1
@@ -28,7 +45,7 @@ def generate_inventory(hosts):
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
- write_inventory_children(base_inventory, multiple_masters, proxy, storage, scaleup)
+ write_inventory_children(base_inventory, multiple_masters, proxy, scaleup)
write_inventory_vars(base_inventory, multiple_masters, proxy)
@@ -66,7 +83,7 @@ def generate_inventory(hosts):
schedulable = None
# If the node is also a master, we must explicitly set schedulablity:
- if node.master:
+ if node.is_master():
schedulable = node.is_schedulable_node(hosts)
write_host(node, base_inventory, schedulable)
@@ -88,7 +105,7 @@ def generate_inventory(hosts):
return base_inventory_path
def determine_proxy_configuration(hosts):
- proxy = next((host for host in hosts if host.master_lb), None)
+ proxy = next((host for host in hosts if host.is_master_lb()), None)
if proxy:
if proxy.hostname == None:
proxy.hostname = proxy.connect_to
@@ -97,53 +114,81 @@ def determine_proxy_configuration(hosts):
return proxy
def determine_storage_configuration(hosts):
- storage = next((host for host in hosts if host.storage), None)
+ storage = next((host for host in hosts if host.is_storage()), None)
return storage
-def write_inventory_children(base_inventory, multiple_masters, proxy, storage, scaleup):
+def write_inventory_children(base_inventory, multiple_masters, proxy, scaleup):
global CFG
base_inventory.write('\n[OSEv3:children]\n')
- base_inventory.write('masters\n')
- base_inventory.write('nodes\n')
+ for role in CFG.deployment.roles:
+ child = ROLES_TO_GROUPS_MAP.get(role, role)
+ base_inventory.write('{}\n'.format(child))
+
if scaleup:
base_inventory.write('new_nodes\n')
if multiple_masters:
base_inventory.write('etcd\n')
if not getattr(proxy, 'preconfigured', True):
base_inventory.write('lb\n')
- if storage:
- base_inventory.write('nfs\n')
def write_inventory_vars(base_inventory, multiple_masters, proxy):
global CFG
base_inventory.write('\n[OSEv3:vars]\n')
- base_inventory.write('ansible_ssh_user={}\n'.format(CFG.settings['ansible_ssh_user']))
+
+ for variable, value in CFG.settings.iteritems():
+ inventory_var = VARIABLES_MAP.get(variable, None)
+ if inventory_var and value:
+ base_inventory.write('{}={}\n'.format(inventory_var, value))
+
+ for variable, value in CFG.deployment.variables.iteritems():
+ inventory_var = VARIABLES_MAP.get(variable, variable)
+ if value:
+ base_inventory.write('{}={}\n'.format(inventory_var, value))
+
if CFG.settings['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=yes\n')
+
if multiple_masters and proxy is not None:
base_inventory.write('openshift_master_cluster_method=native\n')
base_inventory.write("openshift_master_cluster_hostname={}\n".format(proxy.hostname))
base_inventory.write(
"openshift_master_cluster_public_hostname={}\n".format(proxy.public_hostname))
- if CFG.settings.get('master_routingconfig_subdomain', False):
- base_inventory.write(
- "openshift_master_default_subdomain={}\n".format(
- CFG.settings['master_routingconfig_subdomain']))
+
if CFG.settings.get('variant_version', None) == '3.1':
#base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version')))
base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6'))
- if CFG.settings.get('openshift_http_proxy', ''):
+ write_proxy_settings(base_inventory)
+
+ for name, role_obj in CFG.deployment.roles.iteritems():
+ if role_obj.variables:
+ group_name = ROLES_TO_GROUPS_MAP.get(name, name)
+ base_inventory.write("\n[{}:vars]\n".format(group_name))
+ for variable, value in role_obj.variables.iteritems():
+ inventory_var = VARIABLES_MAP.get(variable, variable)
+ if value:
+ base_inventory.write('{}={}\n'.format(inventory_var, value))
+ base_inventory.write("\n")
+
+
+def write_proxy_settings(base_inventory):
+ try:
base_inventory.write("openshift_http_proxy={}\n".format(
CFG.settings['openshift_http_proxy']))
- if CFG.settings.get('openshift_https_proxy', ''):
+ except KeyError:
+ pass
+ try:
base_inventory.write("openshift_https_proxy={}\n".format(
CFG.settings['openshift_https_proxy']))
- if CFG.settings.get('openshift_no_proxy', ''):
+ except KeyError:
+ pass
+ try:
base_inventory.write("openshift_no_proxy={}\n".format(
CFG.settings['openshift_no_proxy']))
+ except KeyError:
+ pass
def write_host(host, inventory, schedulable=None):
@@ -160,8 +205,6 @@ def write_host(host, inventory, schedulable=None):
facts += ' openshift_public_hostname={}'.format(host.public_hostname)
if host.containerized:
facts += ' containerized={}'.format(host.containerized)
- # TODO: For not write_host is handles both master and nodes.
- # Technically only nodes will ever need this.
# Distinguish between three states, no schedulability specified (use default),
# explicitly set to True, or explicitly set to False:
diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py
index 66ed66660..13973f22f 100644
--- a/utils/test/cli_installer_tests.py
+++ b/utils/test/cli_installer_tests.py
@@ -76,6 +76,14 @@ MOCK_FACTS_QUICKHA = {
'common': {
'ip': '10.0.0.4',
'public_ip': '10.0.0.4',
+ 'hostname': 'node3-private.example.com',
+ 'public_hostname': 'node3.example.com'
+ }
+ },
+ '10.0.0.5': {
+ 'common': {
+ 'ip': '10.0.0.5',
+ 'public_ip': '10.0.0.5',
'hostname': 'proxy-private.example.com',
'public_hostname': 'proxy.example.com'
}
@@ -94,221 +102,279 @@ MOCK_FACTS_QUICKHA = {
BAD_CONFIG = """
variant: %s
ansible_ssh_user: root
-hosts:
- - connect_to: 10.0.0.1
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- node: true
- - connect_to: 10.0.0.3
- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
+deployment:
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
"""
QUICKHA_CONFIG = """
variant: %s
ansible_ssh_user: root
-master_routingconfig_subdomain: example.com
-hosts:
- - connect_to: 10.0.0.1
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - connect_to: 10.0.0.2
- ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- master: true
- node: true
- - connect_to: 10.0.0.3
- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
- master: true
- - connect_to: 10.0.0.4
- ip: 10.0.0.4
- hostname: node3-private.example.com
- public_ip: 24.222.0.4
- public_hostname: node3.example.com
- node: true
- - connect_to: 10.0.0.5
- ip: 10.0.0.5
- hostname: proxy-private.example.com
- public_ip: 24.222.0.5
- public_hostname: proxy.example.com
- master_lb: true
- - connect_to: 10.1.0.1
- ip: 10.1.0.1
- hostname: storage-private.example.com
- public_ip: 24.222.0.6
- public_hostname: storage.example.com
- storage: true
+deployment:
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
"""
QUICKHA_2_MASTER_CONFIG = """
variant: %s
ansible_ssh_user: root
-hosts:
- - connect_to: 10.0.0.1
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - connect_to: 10.0.0.2
- ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- master: true
- node: true
- - connect_to: 10.0.0.4
- ip: 10.0.0.4
- hostname: node3-private.example.com
- public_ip: 24.222.0.4
- public_hostname: node3.example.com
- node: true
- - connect_to: 10.0.0.5
- ip: 10.0.0.5
- hostname: proxy-private.example.com
- public_ip: 24.222.0.5
- public_hostname: proxy.example.com
- master_lb: true
- - connect_to: 10.1.0.1
- ip: 10.1.0.1
- hostname: storage-private.example.com
- public_ip: 24.222.0.6
- public_hostname: storage.example.com
- storage: true
+deployment:
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.5
+ ip: 10.0.0.5
+ hostname: proxy-private.example.com
+ public_ip: 24.222.0.5
+ public_hostname: proxy.example.com
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
"""
QUICKHA_CONFIG_REUSED_LB = """
variant: %s
ansible_ssh_user: root
-hosts:
- - connect_to: 10.0.0.1
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - connect_to: 10.0.0.2
- ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- master: true
- node: true
- master_lb: true
- - connect_to: 10.0.0.3
- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
- master: true
- - connect_to: 10.1.0.1
- ip: 10.1.0.1
- hostname: storage-private.example.com
- public_ip: 24.222.0.6
- public_hostname: storage.example.com
- storage: true
+deployment:
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - master_lb
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ node:
+ storage:
"""
QUICKHA_CONFIG_NO_LB = """
variant: %s
ansible_ssh_user: root
-hosts:
- - connect_to: 10.0.0.1
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - connect_to: 10.0.0.2
- ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- master: true
- node: true
- - connect_to: 10.0.0.3
- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
- master: true
- - connect_to: 10.1.0.1
- ip: 10.1.0.1
- hostname: storage-private.example.com
- public_ip: 24.222.0.6
- public_hostname: storage.example.com
- storage: true
+deployment:
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ node:
+ storage:
"""
QUICKHA_CONFIG_PRECONFIGURED_LB = """
variant: %s
ansible_ssh_user: root
-master_routingconfig_subdomain: example.com
-hosts:
- - connect_to: 10.0.0.1
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - connect_to: 10.0.0.2
- ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- master: true
- node: true
- - connect_to: 10.0.0.3
- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
- master: true
- - connect_to: 10.0.0.4
- ip: 10.0.0.4
- hostname: node3-private.example.com
- public_ip: 24.222.0.4
- public_hostname: node3.example.com
- node: true
- - connect_to: proxy-private.example.com
- hostname: proxy-private.example.com
- public_hostname: proxy.example.com
- master_lb: true
- preconfigured: true
- - connect_to: 10.1.0.1
- ip: 10.1.0.1
- hostname: storage-private.example.com
- public_ip: 24.222.0.6
- public_hostname: storage.example.com
- storage: true
+deployment:
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.4
+ ip: 10.0.0.4
+ hostname: node3-private.example.com
+ public_ip: 24.222.0.4
+ public_hostname: node3.example.com
+ roles:
+ - node
+ - connect_to: proxy-private.example.com
+ hostname: proxy-private.example.com
+ public_hostname: proxy.example.com
+ preconfigured: true
+ roles:
+ - master_lb
+ - connect_to: 10.1.0.1
+ ip: 10.1.0.1
+ hostname: storage-private.example.com
+ public_ip: 24.222.0.6
+ public_hostname: storage.example.com
+ roles:
+ - storage
+ roles:
+ master:
+ master_lb:
+ node:
+ storage:
"""
class UnattendedCliTests(OOCliFixture):
@@ -439,10 +505,7 @@ class UnattendedCliTests(OOCliFixture):
@patch('ooinstall.openshift_ansible.run_main_playbook')
@patch('ooinstall.openshift_ansible.load_system_facts')
def test_inventory_write(self, load_facts_mock, run_playbook_mock):
-
- # Add an ssh user so we can verify it makes it to the inventory file:
- merged_config = "%s\n%s" % (SAMPLE_CONFIG % 'openshift-enterprise',
- "ansible_ssh_user: bob")
+ merged_config = SAMPLE_CONFIG % 'openshift-enterprise'
load_facts_mock.return_value = (MOCK_FACTS, 0)
run_playbook_mock.return_value = 0
@@ -456,7 +519,7 @@ class UnattendedCliTests(OOCliFixture):
# Check the inventory file looks as we would expect:
inventory = ConfigParser.ConfigParser(allow_no_value=True)
inventory.read(os.path.join(self.work_dir, 'hosts'))
- self.assertEquals('bob',
+ self.assertEquals('root',
inventory.get('OSEv3:vars', 'ansible_ssh_user'))
self.assertEquals('openshift-enterprise',
inventory.get('OSEv3:vars', 'deployment_type'))
diff --git a/utils/test/fixture.py b/utils/test/fixture.py
index e01eaebaf..006df739b 100644
--- a/utils/test/fixture.py
+++ b/utils/test/fixture.py
@@ -10,28 +10,36 @@ from click.testing import CliRunner
# Substitute in a product name before use:
SAMPLE_CONFIG = """
variant: %s
+variant_version: 3.2
ansible_ssh_user: root
master_routingconfig_subdomain: example.com
-hosts:
- - connect_to: 10.0.0.1
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - connect_to: 10.0.0.2
- ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- node: true
- - connect_to: 10.0.0.3
- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
+deployment:
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
"""
def read_yaml(config_file_path):
@@ -87,12 +95,13 @@ class OOCliFixture(OOInstallFixture):
self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
def _verify_config_hosts(self, written_config, host_count):
- self.assertEquals(host_count, len(written_config['hosts']))
- for host in written_config['hosts']:
+ self.assertEquals(host_count, len(written_config['deployment']['hosts']))
+ for host in written_config['deployment']['hosts']:
self.assertTrue('hostname' in host)
self.assertTrue('public_hostname' in host)
if 'preconfigured' not in host:
- self.assertTrue('node' in host or 'storage' in host)
+ if 'roles' in host:
+ self.assertTrue('node' in host['roles'] or 'storage' in host['roles'])
self.assertTrue('ip' in host)
self.assertTrue('public_ip' in host)
diff --git a/utils/test/oo_config_tests.py b/utils/test/oo_config_tests.py
index 9f5f8e244..c19fe9e0d 100644
--- a/utils/test/oo_config_tests.py
+++ b/utils/test/oo_config_tests.py
@@ -12,27 +12,35 @@ from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
SAMPLE_CONFIG = """
variant: openshift-enterprise
+variant_version: 3.2
ansible_ssh_user: root
-hosts:
- - connect_to: master-private.example.com
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - connect_to: node1-private.example.com
- ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- node: true
- - connect_to: node2-private.example.com
- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
+deployment:
+ hosts:
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - connect_to: node1-private.example.com
+ ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
"""
# Used to test automatic upgrading of config:
@@ -56,45 +64,59 @@ validated_facts:
CONFIG_INCOMPLETE_FACTS = """
-hosts:
- - connect_to: 10.0.0.1
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- - connect_to: 10.0.0.2
- ip: 10.0.0.2
- hostname: 24.222.0.2
- public_ip: 24.222.0.2
- node: true
- - connect_to: 10.0.0.3
- ip: 10.0.0.3
- node: true
+deployment:
+ hosts:
+ - connect_to: 10.0.0.1
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - connect_to: 10.0.0.2
+ ip: 10.0.0.2
+ hostname: 24.222.0.2
+ public_ip: 24.222.0.2
+ roles:
+ - node
+ - connect_to: 10.0.0.3
+ ip: 10.0.0.3
+ roles:
+ - node
+ roles:
+ master:
+ node:
"""
CONFIG_BAD = """
variant: openshift-enterprise
ansible_ssh_user: root
-hosts:
- - connect_to: master-private.example.com
- ip: 10.0.0.1
- hostname: master-private.example.com
- public_ip: 24.222.0.1
- public_hostname: master.example.com
- master: true
- node: true
- - ip: 10.0.0.2
- hostname: node1-private.example.com
- public_ip: 24.222.0.2
- public_hostname: node1.example.com
- node: true
- - connect_to: node2-private.example.com
- ip: 10.0.0.3
- hostname: node2-private.example.com
- public_ip: 24.222.0.3
- public_hostname: node2.example.com
- node: true
+deployment:
+ hosts:
+ - connect_to: master-private.example.com
+ ip: 10.0.0.1
+ hostname: master-private.example.com
+ public_ip: 24.222.0.1
+ public_hostname: master.example.com
+ roles:
+ - master
+ - node
+ - ip: 10.0.0.2
+ hostname: node1-private.example.com
+ public_ip: 24.222.0.2
+ public_hostname: node1.example.com
+ roles:
+ - node
+ - connect_to: node2-private.example.com
+ ip: 10.0.0.3
+ hostname: node2-private.example.com
+ public_ip: 24.222.0.3
+ public_hostname: node2.example.com
+ roles:
+ - node
+ roles:
+ master:
+ node:
"""
class OOInstallFixture(unittest.TestCase):
@@ -123,47 +145,6 @@ class OOInstallFixture(unittest.TestCase):
return path
-class LegacyOOConfigTests(OOInstallFixture):
-
- def setUp(self):
- OOInstallFixture.setUp(self)
- self.cfg_path = self.write_config(os.path.join(self.work_dir,
- 'ooinstall.conf'), LEGACY_CONFIG)
- self.cfg = OOConfig(self.cfg_path)
-
- def test_load_config_memory(self):
- self.assertEquals('openshift-enterprise', self.cfg.settings['variant'])
- self.assertEquals('3.0', self.cfg.settings['variant_version'])
- self.assertEquals('v1', self.cfg.settings['version'])
-
- self.assertEquals(3, len(self.cfg.hosts))
- h1 = self.cfg.get_host('10.0.0.1')
- self.assertEquals('10.0.0.1', h1.ip)
- self.assertEquals('24.222.0.1', h1.public_ip)
- self.assertEquals('master-private.example.com', h1.hostname)
- self.assertEquals('master.example.com', h1.public_hostname)
-
- h2 = self.cfg.get_host('10.0.0.2')
- self.assertEquals('10.0.0.2', h2.ip)
- self.assertEquals('24.222.0.2', h2.public_ip)
- self.assertEquals('node1-private.example.com', h2.hostname)
- self.assertEquals('node1.example.com', h2.public_hostname)
-
- h3 = self.cfg.get_host('10.0.0.3')
- self.assertEquals('10.0.0.3', h3.ip)
- self.assertEquals('24.222.0.3', h3.public_ip)
- self.assertEquals('node2-private.example.com', h3.hostname)
- self.assertEquals('node2.example.com', h3.public_hostname)
-
- self.assertFalse('masters' in self.cfg.settings)
- self.assertFalse('nodes' in self.cfg.settings)
- self.assertFalse('Description' in self.cfg.settings)
- self.assertFalse('Name' in self.cfg.settings)
- self.assertFalse('Subscription' in self.cfg.settings)
- self.assertFalse('Vendor' in self.cfg.settings)
- self.assertFalse('Version' in self.cfg.settings)
- self.assertFalse('validates_facts' in self.cfg.settings)
-
class OOConfigTests(OOInstallFixture):
@@ -173,16 +154,16 @@ class OOConfigTests(OOInstallFixture):
'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
- self.assertEquals(3, len(ooconfig.hosts))
- self.assertEquals("master-private.example.com", ooconfig.hosts[0].connect_to)
- self.assertEquals("10.0.0.1", ooconfig.hosts[0].ip)
- self.assertEquals("master-private.example.com", ooconfig.hosts[0].hostname)
+ self.assertEquals(3, len(ooconfig.deployment.hosts))
+ self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].connect_to)
+ self.assertEquals("10.0.0.1", ooconfig.deployment.hosts[0].ip)
+ self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].hostname)
self.assertEquals(["10.0.0.1", "10.0.0.2", "10.0.0.3"],
- [host['ip'] for host in ooconfig.settings['hosts']])
+ [host.ip for host in ooconfig.deployment.hosts])
self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
- self.assertEquals('v1', ooconfig.settings['version'])
+ self.assertEquals('v2', ooconfig.settings['version'])
def test_load_bad_config(self):
@@ -222,8 +203,10 @@ class OOConfigTests(OOInstallFixture):
written_config = yaml.safe_load(f.read())
f.close()
- self.assertEquals(3, len(written_config['hosts']))
- for h in written_config['hosts']:
+
+
+ self.assertEquals(3, len(written_config['deployment']['hosts']))
+ for h in written_config['deployment']['hosts']:
self.assertTrue('ip' in h)
self.assertTrue('public_ip' in h)
self.assertTrue('hostname' in h)
@@ -231,7 +214,7 @@ class OOConfigTests(OOInstallFixture):
self.assertTrue('ansible_ssh_user' in written_config)
self.assertTrue('variant' in written_config)
- self.assertEquals('v1', written_config['version'])
+ self.assertEquals('v2', written_config['version'])
# Some advanced settings should not get written out if they
# were not specified by the user:
@@ -256,7 +239,3 @@ class HostTests(OOInstallFixture):
'public_hostname': 'a.example.com',
}
self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
-
-
-
-