summaryrefslogtreecommitdiffstats
path: root/roles
diff options
context:
space:
mode:
Diffstat (limited to 'roles')
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml6
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py25
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py25
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py25
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py25
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py95
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py95
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py8
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py25
-rw-r--r--roles/lib_openshift/library/oc_configmap.py25
-rw-r--r--roles/lib_openshift/library/oc_edit.py25
-rw-r--r--roles/lib_openshift/library/oc_env.py25
-rw-r--r--roles/lib_openshift/library/oc_group.py25
-rw-r--r--roles/lib_openshift/library/oc_image.py25
-rw-r--r--roles/lib_openshift/library/oc_label.py25
-rw-r--r--roles/lib_openshift/library/oc_obj.py25
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py25
-rw-r--r--roles/lib_openshift/library/oc_process.py25
-rw-r--r--roles/lib_openshift/library/oc_project.py25
-rw-r--r--roles/lib_openshift/library/oc_pvc.py25
-rw-r--r--roles/lib_openshift/library/oc_route.py25
-rw-r--r--roles/lib_openshift/library/oc_scale.py25
-rw-r--r--roles/lib_openshift/library/oc_secret.py44
-rw-r--r--roles/lib_openshift/library/oc_service.py99
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py25
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py25
-rw-r--r--roles/lib_openshift/library/oc_user.py25
-rw-r--r--roles/lib_openshift/library/oc_version.py25
-rw-r--r--roles/lib_openshift/library/oc_volume.py25
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py8
-rw-r--r--roles/lib_openshift/src/ansible/oc_secret.py1
-rw-r--r--roles/lib_openshift/src/ansible/oc_service.py1
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py7
-rw-r--r--roles/lib_openshift/src/class/oc_service.py5
-rw-r--r--roles/lib_openshift/src/doc/secret6
-rw-r--r--roles/lib_openshift/src/doc/service7
-rw-r--r--roles/lib_openshift/src/lib/base.py25
-rw-r--r--roles/lib_openshift/src/lib/secret.py5
-rw-r--r--roles/lib_openshift/src/lib/service.py61
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_service.yml5
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_registry.py3
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_router.py3
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py3
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_service.py182
-rw-r--r--roles/nuage_master/defaults/main.yaml4
-rw-r--r--roles/nuage_master/tasks/main.yaml8
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j210
-rw-r--r--roles/nuage_node/tasks/main.yaml15
-rw-r--r--roles/nuage_node/templates/vsp-openshift.j22
-rw-r--r--roles/nuage_node/vars/main.yaml3
-rwxr-xr-xroles/openshift_examples/examples-sync.sh11
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml254
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml254
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml1261
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml157
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml49
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml158
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py25
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py31
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml4
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml4
-rw-r--r--roles/openshift_logging/README.md31
-rw-r--r--roles/openshift_logging/defaults/main.yml9
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml15
-rw-r--r--roles/openshift_logging_curator/templates/curator.j25
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml11
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml22
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j21
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j21
-rw-r--r--roles/openshift_metrics/templates/heapster.j21
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml8
-rw-r--r--roles/openshift_version/tasks/main.yml9
86 files changed, 3415 insertions, 481 deletions
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 3af3e00b2..f0f5a40dd 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -102,7 +102,7 @@
l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
-- name: Pre-pull Container Enginer System Container image
+- name: Pre-pull Container Engine System Container image
command: "atomic pull --storage ostree {{ l_docker_image }}"
changed_when: false
environment:
@@ -119,13 +119,11 @@
path: "{{ docker_conf_dir }}"
state: directory
-- name: Install Container Enginer System Container
+- name: Install Container Engine System Container
oc_atomic_container:
name: "{{ openshift.docker.service_name }}"
image: "{{ l_docker_image }}"
state: latest
- values:
- - "system-package=no"
- name: Configure Container Engine Service File
template:
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index 7573c5b85..3974cc4dd 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -1405,7 +1405,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1419,18 +1418,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index bb3619081..320eac17e 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -1391,7 +1391,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1405,18 +1404,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 358d4515b..f9658d6e1 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -1377,7 +1377,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1391,18 +1390,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 5807f41a8..0bdfd0bad 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -1377,7 +1377,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1391,18 +1390,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index e1b79466e..df0e40d20 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1495,7 +1495,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1509,18 +1508,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1889,10 +1898,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -1903,6 +1914,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -1995,7 +2007,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -2006,6 +2019,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -2018,8 +2032,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -2041,6 +2056,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -2049,6 +2068,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -2110,6 +2130,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
@@ -2350,7 +2417,7 @@ class Registry(OpenShiftCLI):
def prepare_registry(self):
''' prepare a registry for instantiation '''
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
@@ -2656,7 +2723,7 @@ def main():
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index e3b1bbcbc..8af8cb196 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1520,7 +1520,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1534,18 +1533,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1568,7 +1577,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -1579,6 +1589,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -1591,8 +1602,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -1614,6 +1626,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -1622,6 +1638,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -1683,6 +1700,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
@@ -2178,10 +2242,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -2192,6 +2258,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -2782,7 +2849,7 @@ class Router(OpenShiftCLI):
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
@@ -3083,7 +3150,7 @@ def main():
key_file=dict(default=None, type='str'),
images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
index d2620b4cc..1e017a576 100644
--- a/roles/lib_openshift/library/oc_atomic_container.py
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -73,7 +73,9 @@ from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
- args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ # NOTE: system-package=no is hardcoded. This should be changed to an option in the future.
+ args = ['atomic', 'install', '--system', '--system-package=no',
+ '--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
@@ -157,7 +159,9 @@ def core(module):
module.fail_json(rc=rc, msg=err)
return
- containers = json.loads(out)
+ # NOTE: "or '[]' is a workaround until atomic containers list --json
+ # provides an empty list when no containers are present.
+ containers = json.loads(out or '[]')
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 9f3e819a3..3ed0d65dc 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -1369,7 +1369,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1383,18 +1382,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index 3c0e82a09..5c8ed48d2 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -1375,7 +1375,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1389,18 +1388,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 008ce6a12..f3b6d552d 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -1419,7 +1419,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1433,18 +1432,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 824ad4cb3..c6421128a 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -1386,7 +1386,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1400,18 +1399,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 7eacac38e..a791c29af 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -1359,7 +1359,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1373,18 +1372,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 266f8fbcf..bbc123ce0 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -1378,7 +1378,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1392,18 +1391,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 756d7db42..cd1afd0d2 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -1395,7 +1395,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1409,18 +1408,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 88d4ac8ca..215723cc8 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -1398,7 +1398,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1412,18 +1411,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 8e42083ca..358ef5130 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -1330,7 +1330,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1344,18 +1343,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 15e9c606d..025b846c6 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -1387,7 +1387,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1401,18 +1400,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index b653d9018..05dfddab8 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -1384,7 +1384,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1398,18 +1397,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index bab67d499..d7de4964c 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -1379,7 +1379,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1393,18 +1392,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index 7831ec8a4..3090b4cad 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -1429,7 +1429,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1443,18 +1442,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 133942e55..6a505fb6b 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -1373,7 +1373,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1387,18 +1386,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 8c6877bb2..02257500f 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -108,6 +108,12 @@ options:
required: false
default: None
aliases: []
+ type:
+ description:
+ - The secret type.
+ required: false
+ default: None
+ aliases: []
force:
description:
- Whether or not to force the operation
@@ -1419,7 +1425,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1433,18 +1438,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1461,10 +1476,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -1475,6 +1492,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -1564,12 +1582,14 @@ class OCSecret(OpenShiftCLI):
def __init__(self,
namespace,
secret_name=None,
+ secret_type=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
+ self.type = secret_type
self.decode = decode
def get(self):
@@ -1600,6 +1620,8 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.append("--type=%s" % (self.type))
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -1633,6 +1655,8 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.extend(["--type=%s" % (self.type)])
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -1645,6 +1669,7 @@ class OCSecret(OpenShiftCLI):
ocsecret = OCSecret(params['namespace'],
params['name'],
+ params['type'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
@@ -1767,6 +1792,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
+ type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index a482e13c1..308f45488 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -140,6 +140,13 @@ options:
- LoadBalancer
- ExternalName
aliases: []
+ externalips:
+ description:
+ - A list of the external IPs that are exposed for this service.
+ - https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
@@ -1425,7 +1432,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1439,18 +1445,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1473,7 +1489,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -1484,6 +1501,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -1496,8 +1514,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -1519,6 +1538,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -1527,6 +1550,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -1588,6 +1612,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_service.py -*- -*- -*-
@@ -1610,13 +1681,15 @@ class OCService(OpenShiftCLI):
ports,
session_affinity,
service_type,
+ external_ips,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
- cluster_ip, portal_ip, session_affinity, service_type)
+ cluster_ip, portal_ip, session_affinity, service_type,
+ external_ips)
self.user_svc = Service(content=self.config.data)
self.svc = None
@@ -1685,6 +1758,7 @@ class OCService(OpenShiftCLI):
params['ports'],
params['session_affinity'],
params['service_type'],
+ params['external_ips'],
params['kubeconfig'],
params['debug'])
@@ -1786,6 +1860,7 @@ def main():
ports=dict(default=None, type='list'),
session_affinity=dict(default='None', type='str'),
service_type=dict(default='ClusterIP', type='str'),
+ external_ips=dict(default=None, type='list'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index 263398e3d..68c1fc51c 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -1371,7 +1371,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1385,18 +1384,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index cc7fda1b5..ebc5bf8a2 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -1371,7 +1371,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1385,18 +1384,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 48ac28834..d1a20fddc 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -1431,7 +1431,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1445,18 +1444,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index 21dd5c3c9..548c9d8e0 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -1343,7 +1343,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1357,18 +1356,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index be0944843..3826cd8e5 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -1420,7 +1420,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1434,18 +1433,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/src/ansible/oc_adm_registry.py b/roles/lib_openshift/src/ansible/oc_adm_registry.py
index c85973c7d..d669a3488 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_registry.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_registry.py
@@ -17,7 +17,7 @@ def main():
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/src/ansible/oc_adm_router.py b/roles/lib_openshift/src/ansible/oc_adm_router.py
index b6f8e90d0..c6563cc2f 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_router.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_router.py
@@ -21,7 +21,7 @@ def main():
key_file=dict(default=None, type='str'),
images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
index 20d75cb63..1a5ab6869 100644
--- a/roles/lib_openshift/src/ansible/oc_atomic_container.py
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -9,7 +9,9 @@ from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
- args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ # NOTE: system-package=no is hardcoded. This should be changed to an option in the future.
+ args = ['atomic', 'install', '--system', '--system-package=no',
+ '--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
@@ -93,7 +95,9 @@ def core(module):
module.fail_json(rc=rc, msg=err)
return
- containers = json.loads(out)
+ # NOTE: "or '[]' is a workaround until atomic containers list --json
+ # provides an empty list when no containers are present.
+ containers = json.loads(out or '[]')
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
diff --git a/roles/lib_openshift/src/ansible/oc_secret.py b/roles/lib_openshift/src/ansible/oc_secret.py
index 1337cbbe5..faa7c1772 100644
--- a/roles/lib_openshift/src/ansible/oc_secret.py
+++ b/roles/lib_openshift/src/ansible/oc_secret.py
@@ -15,6 +15,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
+ type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
diff --git a/roles/lib_openshift/src/ansible/oc_service.py b/roles/lib_openshift/src/ansible/oc_service.py
index 9eb144e9c..b90c08255 100644
--- a/roles/lib_openshift/src/ansible/oc_service.py
+++ b/roles/lib_openshift/src/ansible/oc_service.py
@@ -21,6 +21,7 @@ def main():
ports=dict(default=None, type='list'),
session_affinity=dict(default='None', type='str'),
service_type=dict(default='ClusterIP', type='str'),
+ external_ips=dict(default=None, type='list'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
index 3c130fe28..ad6869bb6 100644
--- a/roles/lib_openshift/src/class/oc_adm_registry.py
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -143,7 +143,7 @@ class Registry(OpenShiftCLI):
def prepare_registry(self):
''' prepare a registry for instantiation '''
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py
index 1a0b94b80..0d50116d1 100644
--- a/roles/lib_openshift/src/class/oc_adm_router.py
+++ b/roles/lib_openshift/src/class/oc_adm_router.py
@@ -222,7 +222,7 @@ class Router(OpenShiftCLI):
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index deb36a9fa..ee83580df 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -13,12 +13,14 @@ class OCSecret(OpenShiftCLI):
def __init__(self,
namespace,
secret_name=None,
+ secret_type=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
+ self.type = secret_type
self.decode = decode
def get(self):
@@ -49,6 +51,8 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.append("--type=%s" % (self.type))
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -82,6 +86,8 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.extend(["--type=%s" % (self.type)])
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -94,6 +100,7 @@ class OCSecret(OpenShiftCLI):
ocsecret = OCSecret(params['namespace'],
params['name'],
+ params['type'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
diff --git a/roles/lib_openshift/src/class/oc_service.py b/roles/lib_openshift/src/class/oc_service.py
index 20cf23df5..7268a0c88 100644
--- a/roles/lib_openshift/src/class/oc_service.py
+++ b/roles/lib_openshift/src/class/oc_service.py
@@ -19,13 +19,15 @@ class OCService(OpenShiftCLI):
ports,
session_affinity,
service_type,
+ external_ips,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
- cluster_ip, portal_ip, session_affinity, service_type)
+ cluster_ip, portal_ip, session_affinity, service_type,
+ external_ips)
self.user_svc = Service(content=self.config.data)
self.svc = None
@@ -94,6 +96,7 @@ class OCService(OpenShiftCLI):
params['ports'],
params['session_affinity'],
params['service_type'],
+ params['external_ips'],
params['kubeconfig'],
params['debug'])
diff --git a/roles/lib_openshift/src/doc/secret b/roles/lib_openshift/src/doc/secret
index 5c2bd9bc0..76b147f6f 100644
--- a/roles/lib_openshift/src/doc/secret
+++ b/roles/lib_openshift/src/doc/secret
@@ -57,6 +57,12 @@ options:
required: false
default: None
aliases: []
+ type:
+ description:
+ - The secret type.
+ required: false
+ default: None
+ aliases: []
force:
description:
- Whether or not to force the operation
diff --git a/roles/lib_openshift/src/doc/service b/roles/lib_openshift/src/doc/service
index 418f91dc5..ba9aa0b38 100644
--- a/roles/lib_openshift/src/doc/service
+++ b/roles/lib_openshift/src/doc/service
@@ -89,6 +89,13 @@ options:
- LoadBalancer
- ExternalName
aliases: []
+ externalips:
+ description:
+ - A list of the external IPs that are exposed for this service.
+ - https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 70755187e..b3f01008b 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -581,7 +581,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -595,18 +594,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py
index 75c32e8b1..a1c202442 100644
--- a/roles/lib_openshift/src/lib/secret.py
+++ b/roles/lib_openshift/src/lib/secret.py
@@ -9,10 +9,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -23,6 +25,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
diff --git a/roles/lib_openshift/src/lib/service.py b/roles/lib_openshift/src/lib/service.py
index eef568779..0e8cc3aa5 100644
--- a/roles/lib_openshift/src/lib/service.py
+++ b/roles/lib_openshift/src/lib/service.py
@@ -15,7 +15,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -26,6 +27,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -38,8 +40,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -61,6 +64,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -69,6 +76,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -129,3 +137,50 @@ class Service(Yedit):
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
diff --git a/roles/lib_openshift/src/test/integration/oc_service.yml b/roles/lib_openshift/src/test/integration/oc_service.yml
index 3eb6facef..29535f24a 100755
--- a/roles/lib_openshift/src/test/integration/oc_service.yml
+++ b/roles/lib_openshift/src/test/integration/oc_service.yml
@@ -18,6 +18,9 @@
test-registtry: default
session_affinity: ClientIP
service_type: ClusterIP
+ labels:
+ component: test-registry
+ infra: registry
register: svc_out
- debug: var=svc_out
@@ -25,6 +28,8 @@
that:
- "svc_out.results.results[0]['metadata']['name'] == 'test-registry'"
- svc_out.changed
+ - "svc_out.results.results[0]['metadata']['labels']['component'] == 'test-registry'"
+ - "svc_out.results.results[0]['metadata']['labels']['infra'] == 'registry'"
msg: service create failed.
# Test idempotent create
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
index 97cf86170..77787fe87 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
@@ -218,7 +218,7 @@ class RegistryTest(unittest.TestCase):
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'images': None,
'latest_images': None,
- 'labels': None,
+ 'labels': {"docker-registry": "default", "another-label": "val"},
'ports': ['5000'],
'replicas': 1,
'selector': 'type=infra',
@@ -255,6 +255,7 @@ class RegistryTest(unittest.TestCase):
mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'adm', 'registry',
+ "--labels=another-label=val,docker-registry=default",
'--ports=5000', '--replicas=1', '--selector=type=infra',
'--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
index 5481ac623..dcf768e08 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
@@ -300,7 +300,7 @@ class RouterTest(unittest.TestCase):
'cert_file': None,
'key_file': None,
'cacert_file': None,
- 'labels': None,
+ 'labels': {"router": "router", "another-label": "val"},
'ports': ['80:80', '443:443'],
'images': None,
'latest_images': None,
@@ -363,6 +363,7 @@ class RouterTest(unittest.TestCase):
mock.call(['oc', 'get', 'secret', 'router-certs', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'clusterrolebinding', 'router-router-role', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'adm', 'router', 'router', '--expose-metrics=False', '--external-host-insecure=False',
+ "--labels=another-label=val,router=router",
'--ports=80:80,443:443', '--replicas=2', '--selector=type=infra', '--service-account=router',
'--stats-port=1936', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
index e31393793..09cc4a374 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -38,6 +38,7 @@ class OCSecretTest(unittest.TestCase):
'state': 'present',
'namespace': 'default',
'name': 'testsecretname',
+ 'type': 'Opaque',
'contents': [{
'path': "/tmp/somesecret.json",
'data': "{'one': 1, 'two': 2, 'three': 3}",
@@ -74,7 +75,7 @@ class OCSecretTest(unittest.TestCase):
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'secrets', 'testsecretname', '-o', 'json', '-n', 'default'], None),
- mock.call(['oc', 'secrets', 'new', 'testsecretname', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'secrets', 'new', 'testsecretname', '--type=Opaque', mock.ANY, '-n', 'default'], None),
])
mock_write.assert_has_calls([
diff --git a/roles/lib_openshift/src/test/unit/test_oc_service.py b/roles/lib_openshift/src/test/unit/test_oc_service.py
index e74c66665..9c21a262f 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_service.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_service.py
@@ -39,6 +39,7 @@ class OCServiceTest(unittest.TestCase):
'selector': None,
'session_affinity': None,
'service_type': None,
+ 'external_ips': None,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
@@ -124,6 +125,7 @@ class OCServiceTest(unittest.TestCase):
'selector': {'router': 'router'},
'session_affinity': 'ClientIP',
'service_type': 'ClusterIP',
+ 'external_ips': None,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
@@ -303,3 +305,183 @@ class OCServiceTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @mock.patch('oc_service.Utils.create_tmpfile_copy')
+ @mock.patch('oc_service.OCService._run')
+ def test_create_with_labels(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a create service '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'ports': {'name': '9000-tcp',
+ 'port': 9000,
+ 'protocol': 'TCP',
+ 'targetPOrt': 9000},
+ 'state': 'present',
+ 'labels': {'component': 'some_component', 'infra': 'true'},
+ 'clusterip': None,
+ 'portalip': None,
+ 'selector': {'router': 'router'},
+ 'session_affinity': 'ClientIP',
+ 'service_type': 'ClusterIP',
+ 'external_ips': None,
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ service = '''{
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/services/router",
+ "uid": "fabd2440-e3d8-11e6-951c-0e3dd518cefa",
+ "resourceVersion": "3206",
+ "creationTimestamp": "2017-01-26T15:06:14Z",
+ "labels": {"component": "some_component", "infra": "true"}
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "80-tcp",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 80
+ },
+ {
+ "name": "443-tcp",
+ "protocol": "TCP",
+ "port": 443,
+ "targetPort": 443
+ },
+ {
+ "name": "1936-tcp",
+ "protocol": "TCP",
+ "port": 1936,
+ "targetPort": 1936
+ },
+ {
+ "name": "5000-tcp",
+ "protocol": "TCP",
+ "port": 5000,
+ "targetPort": 5000
+ }
+ ],
+ "selector": {
+ "router": "router"
+ },
+ "clusterIP": "172.30.129.161",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }'''
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: services "router" not found'),
+ (1, '', 'Error from server: services "router" not found'),
+ (0, service, ''),
+ (0, service, '')
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCService.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['returncode'] == 0)
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'router')
+ self.assertEqual(results['results']['results'][0]['metadata']['labels'], {"component": "some_component", "infra": "true"})
+
+ @mock.patch('oc_service.Utils.create_tmpfile_copy')
+ @mock.patch('oc_service.OCService._run')
+ def test_create_with_external_ips(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a create service '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'ports': {'name': '9000-tcp',
+ 'port': 9000,
+ 'protocol': 'TCP',
+ 'targetPOrt': 9000},
+ 'state': 'present',
+ 'labels': {'component': 'some_component', 'infra': 'true'},
+ 'clusterip': None,
+ 'portalip': None,
+ 'selector': {'router': 'router'},
+ 'session_affinity': 'ClientIP',
+ 'service_type': 'ClusterIP',
+ 'external_ips': ['1.2.3.4', '5.6.7.8'],
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ service = '''{
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/services/router",
+ "uid": "fabd2440-e3d8-11e6-951c-0e3dd518cefa",
+ "resourceVersion": "3206",
+ "creationTimestamp": "2017-01-26T15:06:14Z",
+ "labels": {"component": "some_component", "infra": "true"}
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "80-tcp",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 80
+ },
+ {
+ "name": "443-tcp",
+ "protocol": "TCP",
+ "port": 443,
+ "targetPort": 443
+ },
+ {
+ "name": "1936-tcp",
+ "protocol": "TCP",
+ "port": 1936,
+ "targetPort": 1936
+ },
+ {
+ "name": "5000-tcp",
+ "protocol": "TCP",
+ "port": 5000,
+ "targetPort": 5000
+ }
+ ],
+ "selector": {
+ "router": "router"
+ },
+ "clusterIP": "172.30.129.161",
+ "externalIPs": ["1.2.3.4", "5.6.7.8"],
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }'''
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: services "router" not found'),
+ (1, '', 'Error from server: services "router" not found'),
+ (0, service, ''),
+ (0, service, '')
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCService.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['returncode'] == 0)
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'router')
+ self.assertEqual(results['results']['results'][0]['metadata']['labels'], {"component": "some_component", "infra": "true"})
+ self.assertEqual(results['results']['results'][0]['spec']['externalIPs'], ["1.2.3.4", "5.6.7.8"])
diff --git a/roles/nuage_master/defaults/main.yaml b/roles/nuage_master/defaults/main.yaml
deleted file mode 100644
index c90f4f443..000000000
--- a/roles/nuage_master/defaults/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-nuage_master_cspadminpasswd: ""
-nuage_master_adminusername: admin
-nuage_master_adminuserpasswd: admin
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index fefd28bbd..4f8adb63e 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -33,6 +33,14 @@
- include: certificates.yml
+- name: Install Nuage VSD user certificate
+ become: yes
+ copy: src="{{ vsd_user_cert_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_cert_file | basename }}"
+
+- name: Install Nuage VSD user key
+ become: yes
+ copy: src="{{ vsd_user_key_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_key_file | basename }}"
+
- name: Create nuage-openshift-monitor.yaml
become: yes
template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
index de2a97e37..e077128a4 100644
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2
@@ -15,12 +15,10 @@ vspVersion: {{ vsp_version }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
-# CSP admin user's password
-cspAdminPassword: {{ nuage_master_cspadminpasswd }}
-# Enterprise admin user name
-enterpriseAdminUser: {{ nuage_master_adminusername }}
-# Enterprise admin password
-enterpriseAdminPassword: {{ nuage_master_adminuserpasswd }}
+# VSD generated user certificate file location on master node
+userCertificateFile: {{ cert_output_dir }}/{{ vsd_user_cert_file | basename }}
+# VSD generated user key file location on master node
+userKeyFile: {{ cert_output_dir }}/{{ vsd_user_key_file | basename }}
# Location where logs should be saved
log_dir: {{ nuage_mon_rest_server_logdir }}
# Monitor rest server parameters
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index d82dd36a4..928f9e2e6 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -20,6 +20,21 @@
become: yes
yum: name={{ plugin_rpm }} state=present
+- name: Assure CNI conf dir exists
+ become: yes
+ file: path="{{ cni_conf_dir }}" state=directory
+
+- name: Assures Openshift CNI bin dir exists
+ become: yes
+ file: path="{{ cni_bin_dir }}" state=directory
+
+- name: Install CNI loopback plugin
+ become: yes
+ copy:
+ src: "{{ k8s_cni_loopback_plugin }}"
+ dest: "{{ cni_bin_dir }}/{{ k8s_cni_loopback_plugin | basename }}"
+ mode: 0755
+
- name: Copy the certificates and keys
become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
diff --git a/roles/nuage_node/templates/vsp-openshift.j2 b/roles/nuage_node/templates/vsp-openshift.j2
index d3c0a122a..9fab53906 100644
--- a/roles/nuage_node/templates/vsp-openshift.j2
+++ b/roles/nuage_node/templates/vsp-openshift.j2
@@ -8,6 +8,8 @@ CACert: {{ ca_cert }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
+# Name of the VSD user in admin group
+vsdUser: {{ vsduser }}
# IP address and port number of master API server
masterApiServer: {{ api_server }}
# REST server URL
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index 7b789152f..4cf68411f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -19,4 +19,7 @@ nuage_plugin_rest_client_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_node
nuage_ca_master_plugin_key: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.key"
nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
+cni_conf_dir: "/etc/cni/net.d/"
+cni_bin_dir: "/opt/cni/bin/"
+
nuage_plugin_crt_dir: /usr/share/vsp-openshift
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index c7e51bbfc..f3f270c40 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -7,8 +7,7 @@
XPAAS_VERSION=ose-v1.3.6
ORIGIN_VERSION=${1:-v3.6}
-RHAMP_TAG=1.0.0.GA
-RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
+RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
TEMP=`mktemp -d`
@@ -17,9 +16,11 @@ pushd $TEMP
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
wget https://github.com/jboss-fuse/application-templates/archive/GA.zip -O fis-GA.zip
wget https://github.com/jboss-openshift/application-templates/archive/${XPAAS_VERSION}.zip -O application-templates-master.zip
+wget https://github.com/3scale/rhamp-openshift-templates/archive/${RHAMP_TAG}.zip -O amp.zip
unzip origin-master.zip
unzip application-templates-master.zip
unzip fis-GA.zip
+unzip amp.zip
mv origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
mv origin-master/examples/quickstarts/* ${EXAMPLES_BASE}/quickstart-templates/
mv origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
@@ -30,15 +31,11 @@ mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BA
mv application-templates-GA/fis-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
mv application-templates-GA/quickstarts/* ${EXAMPLES_BASE}/xpaas-templates/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
+find 3scale-amp-openshift-templates-${RHAMP_TAG}/ -name '*.yml' -exec mv {} ${EXAMPLES_BASE}/quickstart-templates/ \;
popd
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-example.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-example.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-pgsql-persistent.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-pgsql-persistent.json
-wget ${RHAMP_TEMPLATE} -O ${EXAMPLES_BASE}/quickstart-templates/apicast-gateway-template.yml
-wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/logging-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/logging-deployer.yaml
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..fef86ff5a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.4.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..bbc0c7044
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.5.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml
new file mode 100644
index 000000000..4e469f6e8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml
@@ -0,0 +1,1261 @@
+base_env: &base_env
+- name: RAILS_ENV
+ value: "production"
+- name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+- name: FORCE_SSL
+ value: "true"
+- name: THREESCALE_SUPERDOMAIN
+ value: "${WILDCARD_DOMAIN}"
+- name: TENANT_NAME
+ value: "${TENANT_NAME}"
+- name: APICAST_ACCESS_TOKEN
+ value: "${APICAST_ACCESS_TOKEN}"
+- name: ADMIN_ACCESS_TOKEN
+ value: "${ADMIN_ACCESS_TOKEN}"
+- name: PROVIDER_PLAN
+ value: 'enterprise'
+- name: USER_LOGIN
+ value: "${ADMIN_USERNAME}"
+- name: USER_PASSWORD
+ value: "${ADMIN_PASSWORD}"
+- name: RAILS_LOG_TO_STDOUT
+ value: "true"
+- name: RAILS_LOG_LEVEL
+ value: "info"
+- name: THINKING_SPHINX_ADDRESS
+ value: "system-sphinx"
+- name: THINKING_SPHINX_PORT
+ value: "9306"
+- name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "/tmp/sphinx.conf"
+- name: EVENTS_SHARED_SECRET
+ value: "${SYSTEM_BACKEND_SHARED_SECRET}"
+- name: THREESCALE_SANDBOX_PROXY_OPENSSL_VERIFY_MODE
+ value: "VERIFY_NONE"
+- name: APICAST_BACKEND_ROOT_ENDPOINT
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+- name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+- name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+- name: SECRET_KEY_BASE
+ value: "${SYSTEM_APP_SECRET_KEY_BASE}"
+- name: AMP_RELEASE
+ value: "${AMP_RELEASE}"
+- name: SMTP_ADDRESS
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: address
+- name: SMTP_USER_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: username
+- name: SMTP_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: password
+- name: SMTP_DOMAIN
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: domain
+- name: SMTP_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: port
+- name: SMTP_AUTHENTICATION
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: authentication
+- name: SMTP_OPENSSL_VERIFY_MODE
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: openssl.verify.mode
+- name: BACKEND_ROUTE
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: "system"
+message: "Login on https://${TENANT_NAME}-admin.${WILDCARD_DOMAIN} as ${ADMIN_USERNAME}/${ADMIN_PASSWORD}"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-cron
+ spec:
+ replicas: 1
+ selector:
+ name: backend-cron
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-cron
+ spec:
+ containers:
+ - args:
+ - backend-cron
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-cron
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-redis
+ spec:
+ replicas: 1
+ selector:
+ name: backend-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: backend-redis
+ spec:
+ containers:
+ - image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: backend-redis
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 1
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ volumeMounts:
+ - name: backend-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ volumes:
+ - name: backend-redis-storage
+ persistentVolumeClaim:
+ claimName: backend-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-listener
+ spec:
+ replicas: 1
+ selector:
+ name: backend-listener
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-listener
+ spec:
+ containers:
+ - args:
+ - 3scale_backend
+ - start
+ - "-e"
+ - production
+ - "-p"
+ - '3000'
+ - "-x"
+ - "/dev/stdout"
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+ - name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-listener
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3000
+ readinessProbe:
+ httpGet:
+ path: "/status"
+ port: 3000
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ selector:
+ name: backend-redis
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-listener
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: 3000
+ name: http
+ selector:
+ name: backend-listener
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-provider
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: provider
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-developer
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: developer
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-worker
+ spec:
+ replicas: 1
+ selector:
+ name: backend-worker
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-worker
+ spec:
+ containers:
+ - args:
+ - 3scale_backend_worker
+ - run
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_EVENTS_HOOK
+ value: http://system-provider:3000/master/events/import
+ - name: CONFIG_EVENTS_HOOK_SHARED_SECRET
+ value: ${SYSTEM_BACKEND_SHARED_SECRET}
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-worker
+ triggers:
+ - type: ConfigChange
+
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ ports:
+ - name: system-mysql
+ protocol: TCP
+ port: 3306
+ targetPort: 3306
+ nodePort: 0
+ selector:
+ name: 'system-mysql'
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ name: redis
+ selector:
+ name: system-redis
+
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-redis
+ spec:
+ replicas: 1
+ selector:
+ name: system-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: system-redis
+ spec:
+ containers:
+ - args:
+ image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: system-redis
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - name: system-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ volumes:
+ - name: system-redis-storage
+ persistentVolumeClaim:
+ claimName: system-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-sphinx
+ spec:
+ ports:
+ - port: 9306
+ protocol: TCP
+ targetPort: 9306
+ name: sphinx
+ selector:
+ name: system-sphinx
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sphinx
+ spec:
+ replicas: 1
+ selector:
+ name: system-sphinx
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sphinx
+ spec:
+ volumes:
+ - name: system-sphinx-database
+ emptyDir: {}
+ containers:
+ - args:
+ - rake
+ - 'openshift:thinking_sphinx:start'
+ volumeMounts:
+ - name: system-sphinx-database
+ mountPath: "/opt/system/db/sphinx"
+ env:
+ - name: RAILS_ENV
+ value: production
+ - name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+ - name: THINKING_SPHINX_ADDRESS
+ value: 0.0.0.0
+ - name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "db/sphinx/production.conf"
+ - name: THINKING_SPHINX_PID_FILE
+ value: db/sphinx/searchd.pid
+ - name: DELTA_INDEX_INTERVAL
+ value: '5'
+ - name: FULL_REINDEX_INTERVAL
+ value: '60'
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sphinx
+ livenessProbe:
+ tcpSocket:
+ port: 9306
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-memcache
+ spec:
+ ports:
+ - port: 11211
+ protocol: TCP
+ targetPort: 11211
+ name: memcache
+ selector:
+ name: system-memcache
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-memcache
+ spec:
+ replicas: 1
+ selector:
+ name: system-memcache
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-memcache
+ spec:
+ containers:
+ - args:
+ env:
+ image: 3scale-amp20/memcached:1.4.15-7
+ imagePullPolicy: IfNotPresent
+ name: memcache
+ readinessProbe:
+ exec:
+ command:
+ - "sh"
+ - "-c"
+ - "echo version | nc $HOSTNAME 11211 | grep VERSION"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 11211
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ command:
+ - "memcached"
+ - "-m"
+ - "64"
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-provider-admin-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}-admin.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-provider
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: backend-route
+ labels:
+ app: system-route
+ spec:
+ host: backend-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: backend-listener
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-developer-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-developer
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-staging
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-staging
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-staging
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: http://${APICAST_ACCESS_TOKEN}@system-provider:3000
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "sandbox"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/2"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-staging
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-staging
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-staging
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-production
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-production
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-production
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: "http://${APICAST_ACCESS_TOKEN}@system-provider:3000"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "boot"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "300"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "production"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/1"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-production
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-production
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-production
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-staging-route
+ labels:
+ app: apicast-staging
+ spec:
+ host: api-${TENANT_NAME}-apicast-staging.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-staging
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-production-route
+ labels:
+ app: apicast-production
+ spec:
+ host: api-${TENANT_NAME}-apicast-production.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-production
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-app
+ spec:
+ replicas: 1
+ selector:
+ name: system-app
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ pre:
+ failurePolicy: Retry
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:deploy
+ env: *base_env
+ volumes:
+ - system-storage
+ post:
+ failurePolicy: Abort
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:post_deploy
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-app
+ spec:
+ containers:
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ command: ['env', 'TENANT_MODE=provider', 'PORT=3000', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ name: system-provider
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: provider
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: provider
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ name: provider
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ command: ['env', 'TENANT_MODE=developer', 'PORT=3001', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ imagePullPolicy: IfNotPresent
+ name: system-developer
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: developer
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: developer
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3001
+ protocol: TCP
+ name: developer
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ readOnly: true
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-resque
+ spec:
+ replicas: 1
+ selector:
+ name: system-resque
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-resque
+ spec:
+ containers:
+ - args:
+ - 'rake'
+ - 'resque:work'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-resque
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ - 'rake'
+ - 'resque:scheduler'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-scheduler
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sidekiq
+ spec:
+ replicas: 1
+ selector:
+ name: system-sidekiq
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sidekiq
+ spec:
+ containers:
+ - args:
+ - rake
+ - sidekiq:worker
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sidekiq
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: 'system-mysql'
+ template:
+ metadata:
+ labels:
+ name: 'system-mysql'
+ spec:
+ containers:
+ - name: system-mysql
+ image: ${MYSQL_IMAGE}
+ ports:
+ - containerPort: 3306
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ cpu: '1'
+ memory: 1Gi
+ readinessProbe:
+ timeoutSeconds: 5
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ exec:
+ command:
+ - /bin/sh
+ - '-i'
+ - '-c'
+ - MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3306
+ env:
+ - name: MYSQL_USER
+ value: ${MYSQL_USER}
+ - name: MYSQL_PASSWORD
+ value: ${MYSQL_PASSWORD}
+ - name: MYSQL_DATABASE
+ value: ${MYSQL_DATABASE}
+ - name: MYSQL_ROOT_PASSWORD
+ value: ${MYSQL_ROOT_PASSWORD}
+ - name: MYSQL_LOWER_CASE_TABLE_NAMES
+ value: "1"
+ volumeMounts:
+ - name: 'mysql-storage'
+ mountPath: /var/lib/mysql/data
+ imagePullPolicy: IfNotPresent
+ volumes:
+ - name: 'mysql-storage'
+ persistentVolumeClaim:
+ claimName: 'mysql-storage'
+- kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: redis-config
+ data:
+ redis.conf: |
+ protected-mode no
+
+ port 6379
+
+ timeout 0
+ tcp-keepalive 300
+
+ daemonize no
+ supervised no
+
+ loglevel notice
+
+ databases 16
+
+ save 900 1
+ save 300 10
+ save 60 10000
+
+ stop-writes-on-bgsave-error yes
+
+ rdbcompression yes
+ rdbchecksum yes
+
+ dbfilename dump.rdb
+
+ slave-serve-stale-data yes
+ slave-read-only yes
+
+ repl-diskless-sync no
+ repl-disable-tcp-nodelay no
+
+ appendonly yes
+ appendfilename "appendonly.aof"
+ appendfsync everysec
+ no-appendfsync-on-rewrite no
+ auto-aof-rewrite-percentage 100
+ auto-aof-rewrite-min-size 64mb
+ aof-load-truncated yes
+
+ lua-time-limit 5000
+
+ activerehashing no
+
+ aof-rewrite-incremental-fsync yes
+ dir /var/lib/redis/data
+
+- kind: ConfigMap
+
+ apiVersion: v1
+ metadata:
+ name: smtp
+ data:
+ address: ""
+ username: ""
+ password: ""
+ domain: ""
+ port: ""
+ authentication: ""
+ openssl.verify.mode: ""
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: ADMIN_PASSWORD
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+- name: ADMIN_USERNAME
+ value: admin
+ required: true
+- name: APICAST_ACCESS_TOKEN
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+ description: "Read Only Access Token that is APIcast going to use to download its configuration."
+- name: ADMIN_ACCESS_TOKEN
+ required: false
+ generate: expression
+ from: "[a-z0-9]{16}"
+ description: "Admin Access Token with all scopes and write permissions for API access."
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Tenant name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
+- name: MYSQL_USER
+ displayName: MySQL User
+ description: Username for MySQL user that will be used for accessing the database.
+ value: "mysql"
+ required: true
+- name: MYSQL_PASSWORD
+ displayName: MySQL Password
+ description: Password for the MySQL user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: MYSQL_DATABASE
+ displayName: MySQL Database Name
+ description: Name of the MySQL database accessed.
+ value: "system"
+ required: true
+- name: MYSQL_ROOT_PASSWORD
+ displayName: MySQL Root password.
+ description: Password for Root user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_BACKEND_USERNAME
+ description: Internal 3scale API username for internal 3scale api auth.
+ value: "3scale_api_user"
+ required: true
+- name: SYSTEM_BACKEND_PASSWORD
+ description: Internal 3scale API password for internal 3scale api auth.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: REDIS_IMAGE
+ description: Redis image to use
+ required: true
+ value: rhscl/redis-32-rhel7:3.2-5.7
+- name: MYSQL_IMAGE
+ description: Mysql image to use
+ required: true
+ value: rhscl/mysql-56-rhel7:5.6-13.14
+- name: SYSTEM_BACKEND_SHARED_SECRET
+ description: Shared secret to import events from backend to system.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_APP_SECRET_KEY_BASE
+ description: System application secret key base
+ generate: expression
+ from: "[a-f0-9]{128}"
+ required: true
+- name: APICAST_MANAGEMENT_API
+ description: "Scope of the APIcast Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: APICAST_OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification when downloading the configuration. Can be set to true/false."
+ required: false
+ value: "false"
+- name: APICAST_RESPONSE_CODES
+ description: "Enable logging response codes in APIcast."
+ value: "true"
+ required: false
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
index 34f5fcbcc..e69de29bb 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
@@ -1,149 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: 3scale-gateway
- annotations:
- description: "3scale API Gateway"
- iconClass: "icon-load-balancer"
- tags: "api,gateway,3scale"
-objects:
-- apiVersion: v1
- kind: DeploymentConfig
- metadata:
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- replicas: 2
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- strategy:
- resources: {}
- rollingParams:
- intervalSeconds: 1
- maxSurge: 25%
- maxUnavailable: 25%
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- type: Rolling
- template:
- metadata:
- labels:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- spec:
- containers:
- - env:
- - name: THREESCALE_PORTAL_ENDPOINT
- valueFrom:
- secretKeyRef:
- name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
- key: password
- - name: THREESCALE_CONFIG_FILE
- value: ${THREESCALE_CONFIG_FILE}
- - name: RESOLVER
- value: ${RESOLVER}
- - name: APICAST_SERVICES
- value: ${APICAST_SERVICES}
- - name: APICAST_MISSING_CONFIGURATION
- value: ${MISSING_CONFIGURATION}
- - name: APICAST_LOG_LEVEL
- value: ${APICAST_LOG_LEVEL}
- - name: APICAST_PATH_ROUTING_ENABLED
- value: ${PATH_ROUTING}
- - name: APICAST_RESPONSE_CODES
- value: ${RESPONSE_CODES}
- - name: APICAST_REQUEST_LOGS
- value: ${REQUEST_LOGS}
- - name: APICAST_RELOAD_CONFIG
- value: ${APICAST_RELOAD_CONFIG}
- image: ${THREESCALE_GATEWAY_IMAGE}
- imagePullPolicy: Always
- name: ${THREESCALE_GATEWAY_NAME}
- livenessProbe:
- httpGet:
- path: /status/live
- port: 8090
- initialDelaySeconds: 10
- timeoutSeconds: 1
- readinessProbe:
- httpGet:
- path: /status/ready
- port: 8090
- initialDelaySeconds: 15
- timeoutSeconds: 1
- ports:
- - containerPort: 8080
- protocol: TCP
- resources: {}
- terminationMessagePath: /dev/termination-log
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- securityContext: {}
- terminationGracePeriodSeconds: 30
- triggers:
- - type: ConfigChange
- status: {}
-- apiVersion: v1
- kind: Service
- metadata:
- creationTimestamp: null
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- ports:
- - name: 8080-tcp
- port: 8080
- protocol: TCP
- targetPort: 8080
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- sessionAffinity: None
- type: ClusterIP
- status:
- loadBalancer: {}
-parameters:
-- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
- value: threescale-portal-endpoint-secret
- name: THREESCALE_PORTAL_ENDPOINT_SECRET
- required: true
-- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
- value:
- name: THREESCALE_CONFIG_FILE
- required: false
-- description: "Name for the 3scale API Gateway"
- value: threescalegw
- name: THREESCALE_GATEWAY_NAME
- required: true
-- description: "Docker image to use."
- value: 'rhamp10/apicast-gateway:1.0.0-4'
- name: THREESCALE_GATEWAY_IMAGE
- required: true
-- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
- value:
- name: RESOLVER
- required: false
-- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
- value:
- name: APICAST_SERVICES
- required: false
-- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
- value: exit
- required: false
- name: MISSING_CONFIGURATION
-- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
- name: APICAST_LOG_LEVEL
- required: false
-- description: "Enable path routing. Experimental feature."
- name: PATH_ROUTING
- required: false
- value: "false"
-- description: "Enable traffic logging to 3scale. Includes whole request and response."
- value: "false"
- name: REQUEST_LOGS
- required: false
-- description: "Enable logging response codes to 3scale."
- value: "false"
- name: RESPONSE_CODES
- required: false
-- description: "Reload config on every request"
- value: "false"
- name: APICAST_RELOAD_CONFIG
- required: false
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml
new file mode 100644
index 000000000..8e8051c0b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml
@@ -0,0 +1,157 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: "${APICAST_NAME}"
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: "${CONFIGURATION_URL_SECRET}"
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: "${CONFIGURATION_FILE_PATH}"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "${DEPLOYMENT_ENVIRONMENT}"
+ - name: RESOLVER
+ value: "${RESOLVER}"
+ - name: APICAST_SERVICES
+ value: "${SERVICES_LIST}"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "${CONFIGURATION_LOADER}"
+ - name: APICAST_LOG_LEVEL
+ value: "${LOG_LEVEL}"
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: "${PATH_ROUTING}"
+ - name: APICAST_RESPONSE_CODES
+ value: "${RESPONSE_CODES}"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "${CONFIGURATION_CACHE}"
+ - name: REDIS_URL
+ value: "${REDIS_URL}"
+ - name: APICAST_MANAGEMENT_API
+ value: "${MANAGEMENT_API}"
+ - name: OPENSSL_VERIFY
+ value: "${OPENSSL_VERIFY}"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: "${APICAST_NAME}"
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: management
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - name: proxy
+ containerPort: 8080
+ protocol: TCP
+ - name: management
+ containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ ports:
+ - name: proxy
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: apicast-configuration-url-secret
+ name: CONFIGURATION_URL_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: CONFIGURATION_FILE_PATH
+ required: false
+- description: "Deployment environment. Can be sandbox or production."
+ value: production
+ name: DEPLOYMENT_ENVIRONMENT
+ required: true
+- description: "Name for the 3scale API Gateway"
+ value: apicast
+ name: APICAST_NAME
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: SERVICES_LIST
+ required: false
+- name: CONFIGURATION_LOADER
+ description: "When to load configuration. If on gateway start or incoming request. Allowed values are: lazy, boot."
+ value: boot
+ required: false
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- name: CONFIGURATION_CACHE
+ description: "For how long to cache the downloaded configuration in seconds. Can be left empty, 0 or greater than 60."
+ value: ""
+ required: false
+- description: "Redis URL. Required for OAuth2 integration. ex: redis://PASSWORD@127.0.0.1:6379/0"
+ name: REDIS_URL
+ required: false
+- name: MANAGEMENT_API
+ description: "Scope of the Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification. Can be set to true/false."
+ required: true
+ value: "false"
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
index 81ae63416..ec335daa0 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
@@ -147,6 +147,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
index 7a285dba8..6304586dd 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
@@ -147,6 +147,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
index 9f982c286..152bf1c7c 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
@@ -148,7 +148,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
index 7bee85ddd..f3b5f97f3 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
@@ -148,7 +148,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
index 6ee999cb1..c570ca5d5 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
@@ -154,7 +154,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
index 5c177a7e0..161f1582e 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
@@ -154,7 +154,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml
new file mode 100644
index 000000000..0bbb8e625
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml
@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-pvc"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml
new file mode 100644
index 000000000..00dedecd5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml
@@ -0,0 +1,158 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-apicast-wildcard-router"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-router
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-router
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-router
+ spec:
+ volumes:
+ - name: apicast-router-config
+ configMap:
+ name: apicast-router-config
+ items:
+ - key: router.conf
+ path: router.conf
+ containers:
+ - env:
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-router
+ command: ['bin/apicast']
+ livenessProbe:
+ tcpSocket:
+ port: router
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 30
+ volumeMounts:
+ - name: apicast-router-config
+ mountPath: /opt/app-root/src/sites.d/
+ readOnly: true
+ ports:
+ - containerPort: 8082
+ name: router
+ protocol: TCP
+ - containerPort: 8090
+ name: management
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-router
+ spec:
+ ports:
+ - name: router
+ port: 80
+ protocol: TCP
+ targetPort: router
+ selector:
+ deploymentconfig: apicast-router
+
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: apicast-router-config
+ data:
+ router.conf: |-
+ upstream wildcard {
+ server 0.0.0.1:1;
+
+ balancer_by_lua_block {
+ local round_robin = require 'resty.balancer.round_robin'
+ local balancer = round_robin.new()
+ local peers = balancer:peers(ngx.ctx.apicast)
+
+ local peer, err = balancer:set_peer(peers)
+
+ if not peer then
+ ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE
+ ngx.log(ngx.ERR, "failed to set current backend peer: ", err)
+ ngx.exit(ngx.status)
+ end
+ }
+
+ keepalive 1024;
+ }
+
+ server {
+ listen 8082;
+ server_name ~-(?<apicast>apicast-(staging|production))\.;
+ access_log /dev/stdout combined;
+
+ location / {
+ access_by_lua_block {
+ local resolver = require('resty.resolver'):instance()
+ local servers = resolver:get_servers(ngx.var.apicast, { port = 8080 })
+
+ if #servers == 0 then
+ ngx.status = ngx.HTTP_BAD_GATEWAY
+ ngx.exit(ngx.HTTP_OK)
+ end
+
+ ngx.ctx.apicast = servers
+ }
+ proxy_http_version 1.1;
+ proxy_pass $scheme://wildcard;
+ proxy_set_header Host $host;
+ proxy_set_header Connection "";
+ }
+ }
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: apicast-wildcard-router
+ labels:
+ app: apicast-wildcard-router
+ spec:
+ host: apicast-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-router
+ port:
+ targetPort: router
+ wildcardPolicy: Subdomain
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Domain name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
index 8b1a58ef4..f4e31065f 100644
--- a/roles/openshift_health_checker/openshift_checks/memory_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -1,6 +1,9 @@
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
+MIB = 2**20
+GIB = 2**30
+
class MemoryAvailability(OpenShiftCheck):
"""Check that recommended memory is available."""
@@ -11,10 +14,12 @@ class MemoryAvailability(OpenShiftCheck):
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_memory_bytes = {
- "masters": 16 * 10**9,
- "nodes": 8 * 10**9,
- "etcd": 8 * 10**9,
+ "masters": 16 * GIB,
+ "nodes": 8 * GIB,
+ "etcd": 8 * GIB,
}
+ # https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal
+ memtotal_adjustment = 1 * GIB
@classmethod
def is_active(cls, task_vars):
@@ -25,21 +30,21 @@ class MemoryAvailability(OpenShiftCheck):
def run(self, tmp, task_vars):
group_names = get_var(task_vars, "group_names")
- total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6
+ total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * MIB
recommended_min = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
- configured_min = int(get_var(task_vars, "openshift_check_min_host_memory_gb", default=0)) * 10**9
+ configured_min = float(get_var(task_vars, "openshift_check_min_host_memory_gb", default=0)) * GIB
min_memory_bytes = configured_min or recommended_min
- if total_memory_bytes < min_memory_bytes:
+ if total_memory_bytes + self.memtotal_adjustment < min_memory_bytes:
return {
'failed': True,
'msg': (
- 'Available memory ({available:.1f} GB) '
- 'below recommended value ({recommended:.1f} GB)'
+ 'Available memory ({available:.1f} GiB) is too far '
+ 'below recommended value ({recommended:.1f} GiB)'
).format(
- available=float(total_memory_bytes) / 10**9,
- recommended=float(min_memory_bytes) / 10**9,
+ available=float(total_memory_bytes) / GIB,
+ recommended=float(min_memory_bytes) / GIB,
),
}
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
index 1db203854..4fbaea0a9 100644
--- a/roles/openshift_health_checker/test/memory_availability_test.py
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -37,6 +37,11 @@ def test_is_active(group_names, is_active):
2000, # too low for recommended but not for configured
),
(
+ ['nodes'],
+ 2, # configure threshold where adjustment pushes it over
+ 1900,
+ ),
+ (
['etcd'],
0,
8200,
@@ -65,38 +70,44 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m
['masters'],
0,
0,
- ['0.0 GB'],
+ ['0.0 GiB'],
),
(
['nodes'],
0,
100,
- ['0.1 GB'],
+ ['0.1 GiB'],
+ ),
+ (
+ ['nodes'],
+ 24, # configure higher threshold
+ 20 * 1024, # enough to meet recommended but not configured
+ ['20.0 GiB'],
),
(
['nodes'],
24, # configure higher threshold
- 20000, # enough to meet recommended but not configured
- ['20.0 GB'],
+ 22 * 1024, # not enough for adjustment to push over threshold
+ ['22.0 GiB'],
),
(
['etcd'],
0,
- 7000,
- ['7.0 GB'],
+ 6 * 1024,
+ ['6.0 GiB'],
),
(
['etcd', 'masters'],
0,
- 9000, # enough memory for etcd, not enough for a master
- ['9.0 GB'],
+ 9 * 1024, # enough memory for etcd, not enough for a master
+ ['9.0 GiB'],
),
(
['nodes', 'masters'],
0,
# enough memory for a node, not enough for a master
- 11000,
- ['11.0 GB'],
+ 11 * 1024,
+ ['11.0 GiB'],
),
])
def test_fails_with_insufficient_memory(group_names, configured_min, ansible_memtotal_mb, extra_words):
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index e75e3b16f..192afc87a 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -37,7 +37,7 @@
cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
# End Block
- when: openshift_hosted_router_create_certificate
+ when: openshift_hosted_router_create_certificate | bool
- name: Get the certificate contents for router
copy:
@@ -46,7 +46,7 @@
src: "{{ item }}"
with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
- when: not openshift_hosted_router_create_certificate
+ when: not openshift_hosted_router_create_certificate | bool
- name: Create the router service account(s)
oc_serviceaccount:
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 8fe02444e..8bf98ba41 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -103,9 +103,9 @@ parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
value: "registry.access.redhat.com/openshift3/"
- - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.6", set version "3.6"'
+ - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.6", set version "v3.6"'
name: IMAGE_VERSION
- value: "3.6"
+ value: "v3.6"
- description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
name: OPENSHIFT_OAUTH_PROVIDER_URL
required: true
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 3c410eff2..0c60ef6fd 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -124,3 +124,34 @@ Elasticsearch OPS too, if using an OPS cluster:
- `openshift_logging_es_ops_ca_ext`: The location of the CA cert for the cert
Elasticsearch uses for the external TLS server cert (default is the internal
CA)
+
+### mux - secure_forward listener service
+- `openshift_logging_use_mux`: Default `False`. If this is `True`, a service
+ called `mux` will be deployed. This service will act as a Fluentd
+ secure_forward forwarder for the node agent Fluentd daemonsets running in the
+ cluster. This can be used to reduce the number of connections to the
+ OpenShift API server, by using `mux` and configuring each node Fluentd to
+ send raw logs to mux and turn off the k8s metadata plugin.
+- `openshift_logging_mux_allow_external`: Default `False`. If this is `True`,
+ the `mux` service will be deployed, and it will be configured to allow
+ Fluentd clients running outside of the cluster to send logs using
+ secure_forward. This allows OpenShift logging to be used as a central
+ logging service for clients other than OpenShift, or other OpenShift
+ clusters.
+- `openshift_logging_use_mux_client`: Default `False`. If this is `True`, the
+ node agent Fluentd services will be configured to send logs to the mux
+ service rather than directly to Elasticsearch.
+- `openshift_logging_mux_hostname`: Default is "mux." +
+ `openshift_master_default_subdomain`. This is the hostname *external*_
+ clients will use to connect to mux, and will be used in the TLS server cert
+ subject.
+- `openshift_logging_mux_port`: 24284
+- `openshift_logging_mux_cpu_limit`: 100m
+- `openshift_logging_mux_memory_limit`: 512Mi
+- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
+ first value in the list is the namespace to use for undefined projects,
+ followed by any additional namespaces to create by default - users will
+ typically not need to set this
+- `openshift_logging_mux_namespaces`: Default `[]` - additional namespaces to
+ create for _external_ mux clients to associate with their logs - users will
+ need to set this
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 837c54067..573cbdd09 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -160,8 +160,13 @@ openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(Fa
openshift_logging_use_mux_client: False
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
-openshift_logging_mux_cpu_limit: 100m
-openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_cpu_limit: 500m
+openshift_logging_mux_memory_limit: 1Gi
+# the namespace to use for undefined projects should come first, followed by any
+# additional namespaces to create by default - users will typically not need to set this
+openshift_logging_mux_default_namespaces: ["mux-undefined"]
+# extra namespaces to create for mux clients - users will need to set this
+openshift_logging_mux_namespaces: []
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 7169c4036..040356e3d 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -124,7 +124,7 @@
- system.logging.mux
loop_control:
loop_var: node_name
- when: openshift_logging_use_mux
+ when: openshift_logging_use_mux | bool
- name: Generate PEM cert for Elasticsearch external route
include: generate_pems.yaml component={{node_name}}
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index f2d757294..dde76b142 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -57,6 +57,9 @@
- set_fact: es_indices=[]
when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count == 0
+- set_fact: openshift_logging_es_pvc_prefix="logging-es"
+ when: openshift_logging_es_pvc_prefix == ""
+
# We don't allow scaling down of ES nodes currently
- include_role:
name: openshift_logging_elasticsearch
@@ -66,7 +69,7 @@
openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}"
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
@@ -84,7 +87,7 @@
openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}"
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
@@ -110,7 +113,7 @@
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- #openshift_logging_elasticsearch_storage_type: "{{ }}"
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
@@ -131,7 +134,7 @@
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
- openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if openshift_logging_es_pvc_dynamic | bool else 'emptydir' }}"
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
@@ -191,6 +194,8 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_es_host: "{{ openshift_logging_es_host }}"
+ openshift_logging_curator_es_port: "{{ openshift_logging_es_port }}"
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
@@ -201,6 +206,8 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_curator_ops_deployment: true
+ openshift_logging_curator_es_host: "{{ openshift_logging_es_ops_host }}"
+ openshift_logging_curator_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index f8b84861f..1bf9b9de2 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -89,9 +89,6 @@ spec:
- name: config
mountPath: /etc/curator/settings
readOnly: true
- - name: elasticsearch-storage
- mountPath: /elasticsearch/persistent
- readOnly: true
volumes:
- name: certs
secret:
@@ -99,5 +96,3 @@ spec:
- name: config
configMap:
name: logging-curator
- - name: elasticsearch-storage
- emptyDir: {}
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 8aaa28706..10fa4372c 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -9,8 +9,8 @@ openshift_logging_mux_namespace: logging
### Common settings
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
-openshift_logging_mux_cpu_limit: 100m
-openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_cpu_limit: 500m
+openshift_logging_mux_memory_limit: 1Gi
openshift_logging_mux_replicas: 1
@@ -26,9 +26,14 @@ openshift_logging_mux_use_journal: "{{ openshift_hosted_logging_use_journal | de
openshift_logging_mux_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
-openshift_logging_mux_allow_external: false
+openshift_logging_mux_allow_external: False
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
+# the namespace to use for undefined projects should come first, followed by any
+# additional namespaces to create by default - users will typically not need to set this
+openshift_logging_mux_default_namespaces: ["mux-undefined"]
+# extra namespaces to create for mux clients - users will need to set this
+openshift_logging_mux_namespaces: []
openshift_logging_mux_app_client_cert: /etc/fluent/keys/cert
openshift_logging_mux_app_client_key: /etc/fluent/keys/key
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 432cab9e9..54af40070 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -130,16 +130,14 @@
selector:
component: mux
provider: openshift
- # pending #4091
- #labels:
- #- logging-infra: 'support'
+ labels:
+ logging-infra: 'support'
ports:
- name: mux-forward
port: "{{ openshift_logging_mux_port }}"
targetPort: "mux-forward"
- # pending #4091
- # externalIPs:
- # - "{{ ansible_eth0.ipv4.address }}"
+ external_ips:
+ - "{{ ansible_eth0.ipv4.address }}"
when: openshift_logging_mux_allow_external | bool
- name: Set logging-mux service for internal communication
@@ -150,9 +148,8 @@
selector:
component: mux
provider: openshift
- # pending #4091
- #labels:
- #- logging-infra: 'support'
+ labels:
+ logging-infra: 'support'
ports:
- name: mux-forward
port: "{{ openshift_logging_mux_port }}"
@@ -190,6 +187,13 @@
- "{{ tempdir }}/templates/logging-mux-dc.yaml"
delete_after: true
+- name: Add mux namespaces
+ oc_project:
+ state: present
+ name: "{{ item }}"
+ node_selector: ""
+ with_items: "{{ openshift_logging_mux_namespaces | union(openshift_logging_mux_default_namespaces) }}"
+
- name: Delete temp directory
file:
name: "{{ tempdir }}"
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index 889317847..fc82f49b1 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -30,6 +30,7 @@ spec:
{% endif %}
containers:
- image: "{{ openshift_metrics_image_prefix }}metrics-cassandra:{{ openshift_metrics_image_version }}"
+ imagePullPolicy: Always
name: hawkular-cassandra-{{ node }}
ports:
- name: cql-port
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 401db4e58..9a9363075 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -25,6 +25,7 @@ spec:
{% endif %}
containers:
- image: {{openshift_metrics_image_prefix}}metrics-hawkular-metrics:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
name: hawkular-metrics
ports:
- name: http-endpoint
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index ab998c2fb..d8c7763ea 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -27,6 +27,7 @@ spec:
containers:
- name: heapster
image: {{openshift_metrics_image_prefix}}metrics-heapster:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
ports:
- containerPort: 8082
name: "http-endpoint"
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index e576228ba..508eb9358 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -5,6 +5,14 @@
# - openshift.common.hostname
# - openshift.master.api_port
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd to ensure latest unit files
+ command: systemctl daemon-reload
+
- name: Restart docker
service:
name: "{{ openshift.docker.service_name }}"
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index b35a3fa3c..16792388f 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -30,7 +30,8 @@
- set_fact:
openshift_release: "{{ openshift_release | string }}"
- when: openshift_release is defined
+ when:
+ - openshift_release is defined
# Verify that the image tag is in a valid format
- when:
@@ -106,7 +107,11 @@
fail:
msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
# Both versions have the same string representation
- when: openshift_rpm_version != openshift_version
+ when:
+ - openshift_rpm_version != openshift_version
+ # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
+ - openshift_pkg_version is not defined
+ - openshift_image_tag is not defined
when:
- is_containerized | bool
- not is_atomic | bool