summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--Dockerfile26
-rw-r--r--openshift-ansible.spec30
-rw-r--r--playbooks/adhoc/uninstall.yml8
-rw-r--r--roles/lib_openshift_api/build/ansible/router.py142
-rwxr-xr-xroles/lib_openshift_api/build/generate.py5
-rw-r--r--roles/lib_openshift_api/build/src/base.py65
-rw-r--r--roles/lib_openshift_api/build/src/obj.py2
-rw-r--r--roles/lib_openshift_api/build/src/router.py152
-rw-r--r--roles/lib_openshift_api/build/src/secret.py4
-rwxr-xr-xroles/lib_openshift_api/build/test/router.yml79
-rw-r--r--roles/lib_openshift_api/library/oadm_router.py807
-rw-r--r--roles/lib_openshift_api/library/oc_edit.py65
-rw-r--r--roles/lib_openshift_api/library/oc_obj.py67
-rw-r--r--roles/lib_openshift_api/library/oc_secret.py69
-rw-r--r--roles/lib_zabbix/library/zbx_user.py2
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml2
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py70
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j22
-rw-r--r--roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j22
-rw-r--r--roles/openshift_master/templates/docker/master.docker.service.j22
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j222
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j22
-rw-r--r--roles/openshift_master_facts/tasks/main.yml7
-rw-r--r--roles/openshift_node/tasks/main.yml2
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service2
-rw-r--r--roles/openshift_storage_nfs/README.md13
-rw-r--r--roles/openshift_storage_nfs/meta/main.yml2
30 files changed, 1524 insertions, 133 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 3ad40eefa..23c40682f 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.0.72-1 ./
+3.0.75-1 ./
diff --git a/Dockerfile b/Dockerfile
index 02ab51680..70f6f8a18 100644
--- a/Dockerfile
+++ b/Dockerfile
@@ -1,17 +1,23 @@
FROM rhel7
-MAINTAINER Aaron Weitekamp <aweiteka@redhat.com>
+MAINTAINER Troy Dawson <tdawson@redhat.com>
-RUN yum -y install http://dl.fedoraproject.org/pub/epel/epel-release-latest-7.noarch.rpm
+LABEL Name="openshift3/installer"
+LABEL Vendor="Red Hat" License=GPLv2+
+LABEL Version="v3.1.1.901"
+LABEL Release="6"
+LABEL BZComponent="aos3-installation-docker"
+LABEL Architecture="x86_64"
-# Not sure if all of these packages are necessary
-# only git and ansible are known requirements
-RUN yum install -y --enablerepo rhel-7-server-extras-rpms net-tools bind-utils git ansible pyOpenSSL
+RUN INSTALL_PKGS="atomic-openshift-utils" && \
+ yum install -y --enablerepo=rhel-7-server-ose-3.2-rpms $INSTALL_PKGS && \
+ rpm -V $INSTALL_PKGS && \
+ yum clean all
-ADD ./ /opt/openshift-ansible/
+# Expect user to mount a workdir for container output (installer.cfg, hosts inventory, ansible log)
+VOLUME /var/lib/openshift-installer/
+WORKDIR /var/lib/openshift-installer/
-ENTRYPOINT ["/usr/bin/ansible-playbook"]
+RUN mkdir -p /var/lib/openshift-installer/
-CMD ["/opt/openshift-ansible/playbooks/byo/config.yml"]
-
-LABEL RUN docker run -it --rm --privileged --net=host -v ~/.ssh:/root/.ssh -v /etc/ansible:/etc/ansible --name NAME -e NAME=NAME -e IMAGE=IMAGE IMAGE
+ENTRYPOINT ["/usr/bin/atomic-openshift-installer", "-c", "/var/lib/openshift-installer/installer.cfg", "--ansible-log-path", "/var/lib/openshift-installer/ansible.log"]
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 269422801..845455444 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -5,7 +5,7 @@
}
Name: openshift-ansible
-Version: 3.0.72
+Version: 3.0.75
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -279,6 +279,34 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Apr 07 2016 Troy Dawson <tdawson@redhat.com> 3.0.75-1
+- First attempt at oadm router module (kwoodson@redhat.com)
+- Remove openshift_common dep from openshift_storage_nfs (abutcher@redhat.com)
+- Add cloudprovider config dir to docker options. (abutcher@redhat.com)
+- Check for kind in cloudprovider facts prior to accessing.
+ (abutcher@redhat.com)
+
+* Wed Apr 06 2016 Brenton Leanhardt <bleanhar@redhat.com> 3.0.74-1
+- Add support for configuring oauth templates. (dgoodwin@redhat.com)
+- Add support for templating master admissionConfig. (dgoodwin@redhat.com)
+
+* Wed Apr 06 2016 Troy Dawson <tdawson@redhat.com> 3.0.73-1
+- Replace unused Dockerfile with one used for official builds.
+ (dgoodwin@redhat.com)
+- Update for zbx_user refresh (kwoodson@redhat.com)
+- Docker 1.9 is actually cool starting in origin 1.1.4 (sdodson@redhat.com)
+- Unmask services (bleanhar@redhat.com)
+- XPAAS v1.3 for OSE 3.2 (sdodson@redhat.com)
+- XPAAS 1.3 content for OSE 3.1 (sdodson@redhat.com)
+- Bug 1322788 - The IMAGE_VERSION wasn't added to atomic-openshift-master-api
+ and atomic-openshift-master-controllers (bleanhar@redhat.com)
+- Bug 1323123 - upgrade failed to containerized OSE on RHEL Host without ose3.2
+ repo (bleanhar@redhat.com)
+- Write inventory to same directory as quick install config.
+ (dgoodwin@redhat.com)
+- Add --gen-inventory command to atomic-openshift-installer.
+ (dgoodwin@redhat.com)
+
* Tue Apr 05 2016 Troy Dawson <tdawson@redhat.com> 3.0.72-1
- when docker is installed, make it 1.8.2 to avoid issues (mwoodson@redhat.com)
- Downgrade to docker 1.8.2 if installing OSE < 3.2 (sdodson@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 680964d80..8fb515982 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -53,6 +53,14 @@
- pcsd
failed_when: false
+ - name: unmask services
+ command: systemctl unmask "{{ item }}"
+ changed_when: False
+ failed_when: False
+ with_items:
+ - etcd
+ - firewalld
+
- name: Stop additional atomic services
service: name={{ item }} state=stopped
when: is_containerized | bool
diff --git a/roles/lib_openshift_api/build/ansible/router.py b/roles/lib_openshift_api/build/ansible/router.py
new file mode 100644
index 000000000..3b24c7b5e
--- /dev/null
+++ b/roles/lib_openshift_api/build/ansible/router.py
@@ -0,0 +1,142 @@
+# pylint: skip-file
+
+def main():
+ '''
+ ansible oc module for secrets
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default='router', type='str'),
+
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ credentials=dict(default='/etc/origin/master/openshift-router.kubeconfig', type='str'),
+ cert_file=dict(default=None, type='str'),
+ key_file=dict(default=None, type='str'),
+ image=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
+ latest_image=dict(default=False, type='bool'),
+ labels=dict(default=None, type='list'),
+ ports=dict(default=['80:80', '443:443'], type='list'),
+ replicas=dict(default=1, type='int'),
+ selector=dict(default=None, type='str'),
+ service_account=dict(default='router', type='str'),
+ router_type=dict(default='haproxy-router', type='str'),
+ host_network=dict(default=True, type='bool'),
+ # external host options
+ external_host=dict(default=None, type='str'),
+ external_host_vserver=dict(default=None, type='str'),
+ external_host_insecure=dict(default=False, type='bool'),
+ external_host_partition_path=dict(default=None, type='str'),
+ external_host_username=dict(default=None, type='str'),
+ external_host_password=dict(default=None, type='str'),
+ external_host_private_key=dict(default=None, type='str'),
+ # Metrics
+ expose_metrics=dict(default=False, type='bool'),
+ metrics_image=dict(default=None, type='str'),
+ # Stats
+ stats_user=dict(default=None, type='str'),
+ stats_password=dict(default=None, type='str'),
+ stats_port=dict(default=1936, type='int'),
+
+ ),
+ mutually_exclusive=[["router_type", "images"]],
+
+ supports_check_mode=True,
+ )
+
+ rconfig = RouterConfig(module.params['name'],
+ module.params['kubeconfig'],
+ {'credentials': {'value': module.params['credentials'], 'include': True},
+ 'default_cert': {'value': None, 'include': True},
+ 'cert_file': {'value': module.params['cert_file'], 'include': False},
+ 'key_file': {'value': module.params['key_file'], 'include': False},
+ 'image': {'value': module.params['image'], 'include': True},
+ 'latest_image': {'value': module.params['latest_image'], 'include': True},
+ 'labels': {'value': module.params['labels'], 'include': True},
+ 'ports': {'value': ','.join(module.params['ports']), 'include': True},
+ 'replicas': {'value': module.params['replicas'], 'include': True},
+ 'selector': {'value': module.params['selector'], 'include': True},
+ 'service_account': {'value': module.params['service_account'], 'include': True},
+ 'router_type': {'value': module.params['router_type'], 'include': False},
+ 'host_network': {'value': module.params['host_network'], 'include': True},
+ 'external_host': {'value': module.params['external_host'], 'include': True},
+ 'external_host_vserver': {'value': module.params['external_host_vserver'],
+ 'include': True},
+ 'external_host_insecure': {'value': module.params['external_host_insecure'],
+ 'include': True},
+ 'external_host_partition_path': {'value': module.params['external_host_partition_path'],
+ 'include': True},
+ 'external_host_username': {'value': module.params['external_host_username'],
+ 'include': True},
+ 'external_host_password': {'value': module.params['external_host_password'],
+ 'include': True},
+ 'external_host_private_key': {'value': module.params['external_host_private_key'],
+ 'include': True},
+ 'expose_metrics': {'value': module.params['expose_metrics'], 'include': True},
+ 'metrics_image': {'value': module.params['metrics_image'], 'include': True},
+ 'stats_user': {'value': module.params['stats_user'], 'include': True},
+ 'stats_password': {'value': module.params['stats_password'], 'include': True},
+ 'stats_port': {'value': module.params['stats_port'], 'include': True},
+ })
+
+
+ ocrouter = Router(rconfig)
+
+ state = module.params['state']
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not ocrouter.exists():
+ module.exit_json(changed=False, state="absent")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a delete.')
+
+ api_rval = ocrouter.delete()
+ module.exit_json(changed=True, results=api_rval, state="absent")
+
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not ocrouter.exists():
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a create.')
+
+ api_rval = ocrouter.create()
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ ########
+ # Update
+ ########
+ if not ocrouter.needs_update():
+ module.exit_json(changed=False, state="present")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed an update.')
+
+ api_rval = ocrouter.update()
+
+ if api_rval['returncode'] != 0:
+ module.fail_json(msg=api_rval)
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+main()
diff --git a/roles/lib_openshift_api/build/generate.py b/roles/lib_openshift_api/build/generate.py
index cf3f61d2c..9fc1986f1 100755
--- a/roles/lib_openshift_api/build/generate.py
+++ b/roles/lib_openshift_api/build/generate.py
@@ -33,6 +33,11 @@ FILES = {'oc_obj.py': ['src/base.py',
'src/edit.py',
'ansible/edit.py',
],
+ 'oadm_router.py': ['src/base.py',
+ '../../lib_yaml_editor/build/src/yedit.py',
+ 'src/router.py',
+ 'ansible/router.py',
+ ],
}
diff --git a/roles/lib_openshift_api/build/src/base.py b/roles/lib_openshift_api/build/src/base.py
index 66831c4e2..257379d92 100644
--- a/roles/lib_openshift_api/build/src/base.py
+++ b/roles/lib_openshift_api/build/src/base.py
@@ -20,12 +20,12 @@ yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
- ''' Constructor for OpenshiftOC '''
+ ''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
@@ -58,15 +58,15 @@ class OpenShiftCLI(object):
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
- return self.oc_cmd(cmd)
+ return self.openshift_cmd(cmd)
def _create(self, fname):
'''return all pods '''
- return self.oc_cmd(['create', '-f', fname, '-n', self.namespace])
+ return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname):
'''return all pods '''
- return self.oc_cmd(['delete', resource, rname, '-n', self.namespace])
+ return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
def _get(self, resource, rname=None):
'''return a secret by name '''
@@ -74,7 +74,7 @@ class OpenShiftCLI(object):
if rname:
cmd.append(rname)
- rval = self.oc_cmd(cmd, output=True)
+ rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
@@ -84,10 +84,15 @@ class OpenShiftCLI(object):
return rval
- def oc_cmd(self, cmd, output=False):
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
'''Base command for oc '''
#cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = ['/usr/bin/oc']
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
cmds.extend(cmd)
rval = {}
@@ -105,18 +110,21 @@ class OpenShiftCLI(object):
proc.wait()
stdout = proc.stdout.read()
stderr = proc.stderr.read()
-
rval = {"returncode": proc.returncode,
"results": results,
+ "cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ err = err.message
+ elif output_type == 'raw':
+ rval['results'] = stdout
if self.verbose:
print stdout
@@ -219,11 +227,13 @@ class Utils(object):
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
- def check_def_equal(user_def, result_def, debug=False):
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
@@ -232,11 +242,27 @@ class Utils(object):
# Both are lists
if isinstance(value, list):
if not isinstance(user_def[key], list):
+ if debug:
+ print 'user_def[key] is not a list'
return False
- # lists should be identical
- if value != user_def[key]:
- return False
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print 'sending list - list'
+ print type(values[0])
+ print type(values[1])
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print 'list compare returned false'
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print 'value should be identical'
+ print value
+ print user_def[key]
+ return False
# recurse on a dictionary
elif isinstance(value, dict):
@@ -255,10 +281,11 @@ class Utils(object):
print "keys are not equal in dict"
return False
- result = Utils.check_def_equal(user_def[key], value, debug=debug)
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
+ print result
return False
# Verify each key, value pair is the same
diff --git a/roles/lib_openshift_api/build/src/obj.py b/roles/lib_openshift_api/build/src/obj.py
index a3ad4b3c4..13aeba8e1 100644
--- a/roles/lib_openshift_api/build/src/obj.py
+++ b/roles/lib_openshift_api/build/src/obj.py
@@ -62,7 +62,7 @@ class OCObject(OpenShiftCLI):
data = Utils.get_resource_file(files[0], content_type)
# if equal then no need. So not equal is True
- return not Utils.check_def_equal(data, objects['results'][0], True)
+ return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
else:
data = content
diff --git a/roles/lib_openshift_api/build/src/router.py b/roles/lib_openshift_api/build/src/router.py
new file mode 100644
index 000000000..69454d594
--- /dev/null
+++ b/roles/lib_openshift_api/build/src/router.py
@@ -0,0 +1,152 @@
+# pylint: skip-file
+
+import time
+
+class RouterConfig(object):
+ ''' RouterConfig is a DTO for the router. '''
+ def __init__(self, rname, kubeconfig, router_options):
+ self.name = rname
+ self.kubeconfig = kubeconfig
+ self._router_options = router_options
+
+ @property
+ def router_options(self):
+ ''' return router options '''
+ return self._router_options
+
+ def to_option_list(self):
+ ''' return all options as a string'''
+ return RouterConfig.stringify(self.router_options)
+
+ @staticmethod
+ def stringify(options):
+ ''' return hash as list of key value pairs '''
+ rval = []
+ for key, data in options.items():
+ if data['include'] and data['value']:
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+class Router(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ router_config,
+ verbose=False):
+ ''' Constructor for OpenshiftOC
+
+ a router consists of 3 or more parts
+ - dc/router
+ - svc/router
+ - endpoint/router
+ '''
+ super(Router, self).__init__('default', router_config.kubeconfig, verbose)
+ self.rconfig = router_config
+ self.verbose = verbose
+ self.router_parts = [{'kind': 'dc', 'name': self.rconfig.name},
+ {'kind': 'svc', 'name': self.rconfig.name},
+ #{'kind': 'endpoints', 'name': self.rconfig.name},
+ ]
+ def get(self, filter_kind=None):
+ ''' return the self.router_parts '''
+ rparts = self.router_parts
+ parts = []
+ if filter_kind:
+ rparts = [part for part in self.router_parts if filter_kind == part['kind']]
+
+ for part in rparts:
+ parts.append(self._get(part['kind'], rname=part['name']))
+
+ return parts
+
+ def exists(self):
+ '''return a deploymentconfig by name '''
+ parts = self.get()
+ for part in parts:
+ if part['returncode'] != 0:
+ return False
+
+ return True
+
+ def delete(self):
+ '''return all pods '''
+ parts = []
+ for part in self.router_parts:
+ parts.append(self._delete(part['kind'], part['name']))
+
+ return parts
+
+ def create(self, dryrun=False, output=False, output_type='json'):
+ '''Create a deploymentconfig '''
+ # We need to create the pem file
+ router_pem = '/tmp/router.pem'
+ with open(router_pem, 'w') as rfd:
+ rfd.write(open(self.rconfig.router_options['cert_file']['value']).read())
+ rfd.write(open(self.rconfig.router_options['key_file']['value']).read())
+
+ atexit.register(Utils.cleanup, [router_pem])
+ self.rconfig.router_options['default_cert']['value'] = router_pem
+
+ options = self.rconfig.to_option_list()
+
+ cmd = ['router']
+ cmd.extend(options)
+ if dryrun:
+ cmd.extend(['--dry-run=True', '-o', 'json'])
+
+ results = self.openshift_cmd(cmd, oadm=True, output=output, output_type=output_type)
+
+ return results
+
+ def update(self):
+ '''run update for the router. This performs a delete and then create '''
+ parts = self.delete()
+ if any([part['returncode'] != 0 for part in parts]):
+ return parts
+
+ # Ugly built in sleep here.
+ time.sleep(15)
+
+ return self.create()
+
+ def needs_update(self, verbose=False):
+ ''' check to see if we need to update '''
+ dc_inmem = self.get(filter_kind='dc')[0]
+ if dc_inmem['returncode'] != 0:
+ return dc_inmem
+
+ user_dc = self.create(dryrun=True, output=True, output_type='raw')
+ if user_dc['returncode'] != 0:
+ return user_dc
+
+ # Since the output from oadm_router is returned as raw
+ # we need to parse it. The first line is the stats_password
+ user_dc_results = user_dc['results'].split('\n')
+ # stats_password = user_dc_results[0]
+
+ # Load the string back into json and get the newly created dc
+ user_dc = json.loads('\n'.join(user_dc_results[1:]))['items'][0]
+
+ # Router needs some exceptions.
+ # We do not want to check the autogenerated password for stats admin
+ if not self.rconfig.router_options['stats_password']['value']:
+ for idx, env_var in enumerate(user_dc['spec']['template']['spec']['containers'][0]['env']):
+ if env_var['name'] == 'STATS_PASSWORD':
+ env_var['value'] = \
+ dc_inmem['results'][0]['spec']['template']['spec']['containers'][0]['env'][idx]['value']
+
+ # dry-run doesn't add the protocol to the ports section. We will manually do that.
+ for idx, port in enumerate(user_dc['spec']['template']['spec']['containers'][0]['ports']):
+ if not port.has_key('protocol'):
+ port['protocol'] = 'TCP'
+
+ # These are different when generating
+ skip = ['dnsPolicy',
+ 'terminationGracePeriodSeconds',
+ 'restartPolicy', 'timeoutSeconds',
+ 'livenessProbe', 'readinessProbe',
+ 'terminationMessagePath',
+ 'rollingParams',
+ ]
+
+ return not Utils.check_def_equal(user_dc, dc_inmem['results'][0], skip_keys=skip, debug=verbose)
diff --git a/roles/lib_openshift_api/build/src/secret.py b/roles/lib_openshift_api/build/src/secret.py
index af61dfa01..154716828 100644
--- a/roles/lib_openshift_api/build/src/secret.py
+++ b/roles/lib_openshift_api/build/src/secret.py
@@ -32,7 +32,7 @@ class Secret(OpenShiftCLI):
cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
- return self.oc_cmd(cmd)
+ return self.openshift_cmd(cmd)
def update(self, files, force=False):
'''run update secret
@@ -63,6 +63,6 @@ class Secret(OpenShiftCLI):
cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
- return self.oc_cmd(cmd, output=True)
+ return self.openshift_cmd(cmd, output=True)
diff --git a/roles/lib_openshift_api/build/test/router.yml b/roles/lib_openshift_api/build/test/router.yml
new file mode 100755
index 000000000..7ab192b97
--- /dev/null
+++ b/roles/lib_openshift_api/build/test/router.yml
@@ -0,0 +1,79 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: "oo_clusterid_mwoodson:&oo_master_primary"
+ gather_facts: no
+ user: root
+
+ tasks:
+ - oadm_router:
+ state: absent
+ credentials: /etc/origin/master/openshift-router.kubeconfig
+ service_account: router
+ replicas: 2
+ namespace: default
+ selector: type=infra
+ cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
+ key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
+ register: routerout
+
+ - debug: var=routerout
+
+ - pause:
+ seconds: 10
+
+ - oadm_router:
+ credentials: /etc/origin/master/openshift-router.kubeconfig
+ service_account: router
+ replicas: 2
+ namespace: default
+ selector: type=infra
+ cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
+ key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
+ register: routerout
+
+ - debug: var=routerout
+
+ - pause:
+ seconds: 10
+
+ - oadm_router:
+ credentials: /etc/origin/master/openshift-router.kubeconfig
+ service_account: router
+ replicas: 2
+ namespace: default
+ selector: type=infra
+ cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
+ key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
+ register: routerout
+
+ - debug: var=routerout
+
+ - pause:
+ seconds: 10
+
+ - oadm_router:
+ credentials: /etc/origin/master/openshift-router.kubeconfig
+ service_account: router
+ replicas: 3
+ namespace: default
+ selector: type=test
+ cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
+ key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
+ register: routerout
+
+ - debug: var=routerout
+
+ - pause:
+ seconds: 10
+
+ - oadm_router:
+ credentials: /etc/origin/master/openshift-router.kubeconfig
+ service_account: router
+ replicas: 2
+ namespace: default
+ selector: type=infra
+ cert_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.crt
+ key_file: /etc/origin/master/named_certificates/12ab.mwoodson.openshiftapps.com.key
+ register: routerout
+
+ - debug: var=routerout
diff --git a/roles/lib_openshift_api/library/oadm_router.py b/roles/lib_openshift_api/library/oadm_router.py
new file mode 100644
index 000000000..c6b45c14e
--- /dev/null
+++ b/roles/lib_openshift_api/library/oadm_router.py
@@ -0,0 +1,807 @@
+#!/usr/bin/env python
+# ___ ___ _ _ ___ ___ _ _____ ___ ___
+# / __| __| \| | __| _ \ /_\_ _| __| \
+# | (_ | _|| .` | _|| / / _ \| | | _|| |) |
+# \___|___|_|\_|___|_|_\/_/_\_\_|_|___|___/_ _____
+# | \ / _ \ | \| |/ _ \_ _| | __| \_ _|_ _|
+# | |) | (_) | | .` | (_) || | | _|| |) | | | |
+# |___/ \___/ |_|\_|\___/ |_| |___|___/___| |_|
+'''
+ OpenShiftCLI class that wraps the oc commands in a subprocess
+'''
+
+import atexit
+import json
+import os
+import shutil
+import subprocess
+import re
+
+import yaml
+# This is here because of a bug that causes yaml
+# to incorrectly handle timezone info on timestamps
+def timestamp_constructor(_, node):
+ '''return timestamps as strings'''
+ return str(node.value)
+yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
+
+# pylint: disable=too-few-public-methods
+class OpenShiftCLI(object):
+ ''' Class to wrap the command line tools '''
+ def __init__(self,
+ namespace,
+ kubeconfig='/etc/origin/master/admin.kubeconfig',
+ verbose=False):
+ ''' Constructor for OpenshiftCLI '''
+ self.namespace = namespace
+ self.verbose = verbose
+ self.kubeconfig = kubeconfig
+
+ # Pylint allows only 5 arguments to be passed.
+ # pylint: disable=too-many-arguments
+ def _replace_content(self, resource, rname, content, force=False):
+ ''' replace the current object with the content '''
+ res = self._get(resource, rname)
+ if not res['results']:
+ return res
+
+ fname = '/tmp/%s' % rname
+ yed = Yedit(fname, res['results'][0])
+ changes = []
+ for key, value in content.items():
+ changes.append(yed.put(key, value))
+
+ if any([not change[0] for change in changes]):
+ return {'returncode': 0, 'updated': False}
+
+ yed.write()
+
+ atexit.register(Utils.cleanup, [fname])
+
+ return self._replace(fname, force)
+
+ def _replace(self, fname, force=False):
+ '''return all pods '''
+ cmd = ['-n', self.namespace, 'replace', '-f', fname]
+ if force:
+ cmd.append('--force')
+ return self.openshift_cmd(cmd)
+
+ def _create(self, fname):
+ '''return all pods '''
+ return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
+
+ def _delete(self, resource, rname):
+ '''return all pods '''
+ return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
+
+ def _get(self, resource, rname=None):
+ '''return a secret by name '''
+ cmd = ['get', resource, '-o', 'json', '-n', self.namespace]
+ if rname:
+ cmd.append(rname)
+
+ rval = self.openshift_cmd(cmd, output=True)
+
+ # Ensure results are retuned in an array
+ if rval.has_key('items'):
+ rval['results'] = rval['items']
+ elif not isinstance(rval['results'], list):
+ rval['results'] = [rval['results']]
+
+ return rval
+
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
+ '''Base command for oc '''
+ #cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
+ cmds.extend(cmd)
+
+ rval = {}
+ results = ''
+ err = None
+
+ if self.verbose:
+ print ' '.join(cmds)
+
+ proc = subprocess.Popen(cmds,
+ stdout=subprocess.PIPE,
+ stderr=subprocess.PIPE,
+ env={'KUBECONFIG': self.kubeconfig})
+
+ proc.wait()
+ stdout = proc.stdout.read()
+ stderr = proc.stderr.read()
+ rval = {"returncode": proc.returncode,
+ "results": results,
+ "cmd": ' '.join(cmds),
+ }
+
+ if proc.returncode == 0:
+ if output:
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ err = err.message
+ elif output_type == 'raw':
+ rval['results'] = stdout
+
+ if self.verbose:
+ print stdout
+ print stderr
+ print
+
+ if err:
+ rval.update({"err": err,
+ "stderr": stderr,
+ "stdout": stdout,
+ "cmd": cmds
+ })
+
+ else:
+ rval.update({"stderr": stderr,
+ "stdout": stdout,
+ "results": {},
+ })
+
+ return rval
+
+class Utils(object):
+ ''' utilities for openshiftcli modules '''
+ @staticmethod
+ def create_file(rname, data, ftype=None):
+ ''' create a file in tmp with name and contents'''
+ path = os.path.join('/tmp', rname)
+ with open(path, 'w') as fds:
+ if ftype == 'yaml':
+ fds.write(yaml.safe_dump(data, default_flow_style=False))
+
+ elif ftype == 'json':
+ fds.write(json.dumps(data))
+ else:
+ fds.write(data)
+
+ # Register cleanup when module is done
+ atexit.register(Utils.cleanup, [path])
+ return path
+
+ @staticmethod
+ def create_files_from_contents(data):
+ '''Turn an array of dict: filename, content into a files array'''
+ files = []
+
+ for sfile in data:
+ path = Utils.create_file(sfile['path'], sfile['content'])
+ files.append(path)
+
+ return files
+
+ @staticmethod
+ def cleanup(files):
+ '''Clean up on exit '''
+ for sfile in files:
+ if os.path.exists(sfile):
+ if os.path.isdir(sfile):
+ shutil.rmtree(sfile)
+ elif os.path.isfile(sfile):
+ os.remove(sfile)
+
+
+ @staticmethod
+ def exists(results, _name):
+ ''' Check to see if the results include the name '''
+ if not results:
+ return False
+
+
+ if Utils.find_result(results, _name):
+ return True
+
+ return False
+
+ @staticmethod
+ def find_result(results, _name):
+ ''' Find the specified result by name'''
+ rval = None
+ for result in results:
+ if result.has_key('metadata') and result['metadata']['name'] == _name:
+ rval = result
+ break
+
+ return rval
+
+ @staticmethod
+ def get_resource_file(sfile, sfile_type='yaml'):
+ ''' return the service file '''
+ contents = None
+ with open(sfile) as sfd:
+ contents = sfd.read()
+
+ if sfile_type == 'yaml':
+ contents = yaml.safe_load(contents)
+ elif sfile_type == 'json':
+ contents = json.loads(contents)
+
+ return contents
+
+ # Disabling too-many-branches. This is a yaml dictionary comparison function
+ # pylint: disable=too-many-branches,too-many-return-statements
+ @staticmethod
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
+ ''' Given a user defined definition, compare it with the results given back by our query. '''
+
+ # Currently these values are autogenerated and we do not need to check them
+ skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
+
+ for key, value in result_def.items():
+ if key in skip:
+ continue
+
+ # Both are lists
+ if isinstance(value, list):
+ if not isinstance(user_def[key], list):
+ if debug:
+ print 'user_def[key] is not a list'
+ return False
+
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print 'sending list - list'
+ print type(values[0])
+ print type(values[1])
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print 'list compare returned false'
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print 'value should be identical'
+ print value
+ print user_def[key]
+ return False
+
+ # recurse on a dictionary
+ elif isinstance(value, dict):
+ if not isinstance(user_def[key], dict):
+ if debug:
+ print "dict returned false not instance of dict"
+ return False
+
+ # before passing ensure keys match
+ api_values = set(value.keys()) - set(skip)
+ user_values = set(user_def[key].keys()) - set(skip)
+ if api_values != user_values:
+ if debug:
+ print api_values
+ print user_values
+ print "keys are not equal in dict"
+ return False
+
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
+ if not result:
+ if debug:
+ print "dict returned false"
+ print result
+ return False
+
+ # Verify each key, value pair is the same
+ else:
+ if not user_def.has_key(key) or value != user_def[key]:
+ if debug:
+ print "value not equal; user_def does not have key"
+ print value
+ print user_def[key]
+ return False
+
+ return True
+
+class YeditException(Exception):
+ ''' Exception class for Yedit '''
+ pass
+
+class Yedit(object):
+ ''' Class to modify yaml files '''
+ re_valid_key = r"(((\[-?\d+\])|([a-zA-Z-./]+)).?)+$"
+ re_key = r"(?:\[(-?\d+)\])|([a-zA-Z-./]+)"
+
+ def __init__(self, filename=None, content=None, content_type='yaml'):
+ self.content = content
+ self.filename = filename
+ self.__yaml_dict = content
+ self.content_type = content_type
+ if self.filename and not self.content:
+ self.load(content_type=self.content_type)
+
+ @property
+ def yaml_dict(self):
+ ''' getter method for yaml_dict '''
+ return self.__yaml_dict
+
+ @yaml_dict.setter
+ def yaml_dict(self, value):
+ ''' setter method for yaml_dict '''
+ self.__yaml_dict = value
+
+ @staticmethod
+ def remove_entry(data, key):
+ ''' remove data at location key '''
+ if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = re.findall(Yedit.re_key, key)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for remove
+ # expected list entry
+ if key_indexes[-1][0]:
+ if isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
+ del data[int(key_indexes[-1][0])]
+ return True
+
+ # expected dict entry
+ elif key_indexes[-1][1]:
+ if isinstance(data, dict):
+ del data[key_indexes[-1][1]]
+ return True
+
+ @staticmethod
+ def add_entry(data, key, item=None):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
+ return None
+
+ curr_data = data
+
+ key_indexes = re.findall(Yedit.re_key, key)
+ for arr_ind, dict_key in key_indexes[:-1]:
+ if dict_key:
+ if isinstance(data, dict) and data.has_key(dict_key):
+ data = data[dict_key]
+ continue
+
+ data[dict_key] = {}
+ data = data[dict_key]
+
+ elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ # process last index for add
+ # expected list entry
+ if key_indexes[-1][0] and isinstance(data, list) and int(key_indexes[-1][0]) <= len(data) - 1:
+ data[int(key_indexes[-1][0])] = item
+
+ # expected dict entry
+ elif key_indexes[-1][1] and isinstance(data, dict):
+ data[key_indexes[-1][1]] = item
+
+ return curr_data
+
+ @staticmethod
+ def get_entry(data, key):
+ ''' Get an item from a dictionary with key notation a.b.c
+ d = {'a': {'b': 'c'}}}
+ key = a.b
+ return c
+ '''
+ if not (key and re.match(Yedit.re_valid_key, key) and isinstance(data, (list, dict))):
+ return None
+
+ key_indexes = re.findall(Yedit.re_key, key)
+ for arr_ind, dict_key in key_indexes:
+ if dict_key and isinstance(data, dict):
+ data = data.get(dict_key, None)
+ elif arr_ind and isinstance(data, list) and int(arr_ind) <= len(data) - 1:
+ data = data[int(arr_ind)]
+ else:
+ return None
+
+ return data
+
+ def write(self):
+ ''' write to file '''
+ if not self.filename:
+ raise YeditException('Please specify a filename.')
+
+ with open(self.filename, 'w') as yfd:
+ yfd.write(yaml.safe_dump(self.yaml_dict, default_flow_style=False))
+
+ def read(self):
+ ''' write to file '''
+ # check if it exists
+ if not self.exists():
+ return None
+
+ contents = None
+ with open(self.filename) as yfd:
+ contents = yfd.read()
+
+ return contents
+
+ def exists(self):
+ ''' return whether file exists '''
+ if os.path.exists(self.filename):
+ return True
+
+ return False
+
+ def load(self, content_type='yaml'):
+ ''' return yaml file '''
+ contents = self.read()
+
+ if not contents:
+ return None
+
+ # check if it is yaml
+ try:
+ if content_type == 'yaml':
+ self.yaml_dict = yaml.load(contents)
+ elif content_type == 'json':
+ self.yaml_dict = json.loads(contents)
+ except yaml.YAMLError as _:
+ # Error loading yaml or json
+ return None
+
+ return self.yaml_dict
+
+ def get(self, key):
+ ''' get a specified key'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key)
+ except KeyError as _:
+ entry = None
+
+ return entry
+
+ def delete(self, key):
+ ''' remove key from a dict'''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key)
+ except KeyError as _:
+ entry = None
+ if not entry:
+ return (False, self.yaml_dict)
+
+ result = Yedit.remove_entry(self.yaml_dict, key)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def put(self, key, value):
+ ''' put key, value into a dict '''
+ try:
+ entry = Yedit.get_entry(self.yaml_dict, key)
+ except KeyError as _:
+ entry = None
+
+ if entry == value:
+ return (False, self.yaml_dict)
+
+ result = Yedit.add_entry(self.yaml_dict, key, value)
+ if not result:
+ return (False, self.yaml_dict)
+
+ return (True, self.yaml_dict)
+
+ def create(self, key, value):
+ ''' create a yaml file '''
+ if not self.exists():
+ self.yaml_dict = {key: value}
+ return (True, self.yaml_dict)
+
+ return (False, self.yaml_dict)
+
+import time
+
+class RouterConfig(object):
+ ''' RouterConfig is a DTO for the router. '''
+ def __init__(self, rname, kubeconfig, router_options):
+ self.name = rname
+ self.kubeconfig = kubeconfig
+ self._router_options = router_options
+
+ @property
+ def router_options(self):
+ ''' return router options '''
+ return self._router_options
+
+ def to_option_list(self):
+ ''' return all options as a string'''
+ return RouterConfig.stringify(self.router_options)
+
+ @staticmethod
+ def stringify(options):
+ ''' return hash as list of key value pairs '''
+ rval = []
+ for key, data in options.items():
+ if data['include'] and data['value']:
+ rval.append('--%s=%s' % (key.replace('_', '-'), data['value']))
+
+ return rval
+
+class Router(OpenShiftCLI):
+ ''' Class to wrap the oc command line tools '''
+ def __init__(self,
+ router_config,
+ verbose=False):
+ ''' Constructor for OpenshiftOC
+
+ a router consists of 3 or more parts
+ - dc/router
+ - svc/router
+ - endpoint/router
+ '''
+ super(Router, self).__init__('default', router_config.kubeconfig, verbose)
+ self.rconfig = router_config
+ self.verbose = verbose
+ self.router_parts = [{'kind': 'dc', 'name': self.rconfig.name},
+ {'kind': 'svc', 'name': self.rconfig.name},
+ #{'kind': 'endpoints', 'name': self.rconfig.name},
+ ]
+ def get(self, filter_kind=None):
+ ''' return the self.router_parts '''
+ rparts = self.router_parts
+ parts = []
+ if filter_kind:
+ rparts = [part for part in self.router_parts if filter_kind == part['kind']]
+
+ for part in rparts:
+ parts.append(self._get(part['kind'], rname=part['name']))
+
+ return parts
+
+ def exists(self):
+ '''return a deploymentconfig by name '''
+ parts = self.get()
+ for part in parts:
+ if part['returncode'] != 0:
+ return False
+
+ return True
+
+ def delete(self):
+ '''return all pods '''
+ parts = []
+ for part in self.router_parts:
+ parts.append(self._delete(part['kind'], part['name']))
+
+ return parts
+
+ def create(self, dryrun=False, output=False, output_type='json'):
+ '''Create a deploymentconfig '''
+ # We need to create the pem file
+ router_pem = '/tmp/router.pem'
+ with open(router_pem, 'w') as rfd:
+ rfd.write(open(self.rconfig.router_options['cert_file']['value']).read())
+ rfd.write(open(self.rconfig.router_options['key_file']['value']).read())
+
+ atexit.register(Utils.cleanup, [router_pem])
+ self.rconfig.router_options['default_cert']['value'] = router_pem
+
+ options = self.rconfig.to_option_list()
+
+ cmd = ['router']
+ cmd.extend(options)
+ if dryrun:
+ cmd.extend(['--dry-run=True', '-o', 'json'])
+
+ results = self.openshift_cmd(cmd, oadm=True, output=output, output_type=output_type)
+
+ return results
+
+ def update(self):
+ '''run update for the router. This performs a delete and then create '''
+ parts = self.delete()
+ if any([part['returncode'] != 0 for part in parts]):
+ return parts
+
+ # Ugly built in sleep here.
+ time.sleep(15)
+
+ return self.create()
+
+ def needs_update(self, verbose=False):
+ ''' check to see if we need to update '''
+ dc_inmem = self.get(filter_kind='dc')[0]
+ if dc_inmem['returncode'] != 0:
+ return dc_inmem
+
+ user_dc = self.create(dryrun=True, output=True, output_type='raw')
+ if user_dc['returncode'] != 0:
+ return user_dc
+
+ # Since the output from oadm_router is returned as raw
+ # we need to parse it. The first line is the stats_password
+ user_dc_results = user_dc['results'].split('\n')
+ # stats_password = user_dc_results[0]
+
+ # Load the string back into json and get the newly created dc
+ user_dc = json.loads('\n'.join(user_dc_results[1:]))['items'][0]
+
+ # Router needs some exceptions.
+ # We do not want to check the autogenerated password for stats admin
+ if not self.rconfig.router_options['stats_password']['value']:
+ for idx, env_var in enumerate(user_dc['spec']['template']['spec']['containers'][0]['env']):
+ if env_var['name'] == 'STATS_PASSWORD':
+ env_var['value'] = \
+ dc_inmem['results'][0]['spec']['template']['spec']['containers'][0]['env'][idx]['value']
+
+ # dry-run doesn't add the protocol to the ports section. We will manually do that.
+ for idx, port in enumerate(user_dc['spec']['template']['spec']['containers'][0]['ports']):
+ if not port.has_key('protocol'):
+ port['protocol'] = 'TCP'
+
+ # These are different when generating
+ skip = ['dnsPolicy',
+ 'terminationGracePeriodSeconds',
+ 'restartPolicy', 'timeoutSeconds',
+ 'livenessProbe', 'readinessProbe',
+ 'terminationMessagePath',
+ 'rollingParams',
+ ]
+
+ return not Utils.check_def_equal(user_dc, dc_inmem['results'][0], skip_keys=skip, debug=verbose)
+
+def main():
+ '''
+ ansible oc module for secrets
+ '''
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ state=dict(default='present', type='str',
+ choices=['present', 'absent']),
+ debug=dict(default=False, type='bool'),
+ namespace=dict(default='default', type='str'),
+ name=dict(default='router', type='str'),
+
+ kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
+ credentials=dict(default='/etc/origin/master/openshift-router.kubeconfig', type='str'),
+ cert_file=dict(default=None, type='str'),
+ key_file=dict(default=None, type='str'),
+ image=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
+ latest_image=dict(default=False, type='bool'),
+ labels=dict(default=None, type='list'),
+ ports=dict(default=['80:80', '443:443'], type='list'),
+ replicas=dict(default=1, type='int'),
+ selector=dict(default=None, type='str'),
+ service_account=dict(default='router', type='str'),
+ router_type=dict(default='haproxy-router', type='str'),
+ host_network=dict(default=True, type='bool'),
+ # external host options
+ external_host=dict(default=None, type='str'),
+ external_host_vserver=dict(default=None, type='str'),
+ external_host_insecure=dict(default=False, type='bool'),
+ external_host_partition_path=dict(default=None, type='str'),
+ external_host_username=dict(default=None, type='str'),
+ external_host_password=dict(default=None, type='str'),
+ external_host_private_key=dict(default=None, type='str'),
+ # Metrics
+ expose_metrics=dict(default=False, type='bool'),
+ metrics_image=dict(default=None, type='str'),
+ # Stats
+ stats_user=dict(default=None, type='str'),
+ stats_password=dict(default=None, type='str'),
+ stats_port=dict(default=1936, type='int'),
+
+ ),
+ mutually_exclusive=[["router_type", "images"]],
+
+ supports_check_mode=True,
+ )
+
+ rconfig = RouterConfig(module.params['name'],
+ module.params['kubeconfig'],
+ {'credentials': {'value': module.params['credentials'], 'include': True},
+ 'default_cert': {'value': None, 'include': True},
+ 'cert_file': {'value': module.params['cert_file'], 'include': False},
+ 'key_file': {'value': module.params['key_file'], 'include': False},
+ 'image': {'value': module.params['image'], 'include': True},
+ 'latest_image': {'value': module.params['latest_image'], 'include': True},
+ 'labels': {'value': module.params['labels'], 'include': True},
+ 'ports': {'value': ','.join(module.params['ports']), 'include': True},
+ 'replicas': {'value': module.params['replicas'], 'include': True},
+ 'selector': {'value': module.params['selector'], 'include': True},
+ 'service_account': {'value': module.params['service_account'], 'include': True},
+ 'router_type': {'value': module.params['router_type'], 'include': False},
+ 'host_network': {'value': module.params['host_network'], 'include': True},
+ 'external_host': {'value': module.params['external_host'], 'include': True},
+ 'external_host_vserver': {'value': module.params['external_host_vserver'],
+ 'include': True},
+ 'external_host_insecure': {'value': module.params['external_host_insecure'],
+ 'include': True},
+ 'external_host_partition_path': {'value': module.params['external_host_partition_path'],
+ 'include': True},
+ 'external_host_username': {'value': module.params['external_host_username'],
+ 'include': True},
+ 'external_host_password': {'value': module.params['external_host_password'],
+ 'include': True},
+ 'external_host_private_key': {'value': module.params['external_host_private_key'],
+ 'include': True},
+ 'expose_metrics': {'value': module.params['expose_metrics'], 'include': True},
+ 'metrics_image': {'value': module.params['metrics_image'], 'include': True},
+ 'stats_user': {'value': module.params['stats_user'], 'include': True},
+ 'stats_password': {'value': module.params['stats_password'], 'include': True},
+ 'stats_port': {'value': module.params['stats_port'], 'include': True},
+ })
+
+
+ ocrouter = Router(rconfig)
+
+ state = module.params['state']
+
+ ########
+ # Delete
+ ########
+ if state == 'absent':
+ if not ocrouter.exists():
+ module.exit_json(changed=False, state="absent")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a delete.')
+
+ api_rval = ocrouter.delete()
+ module.exit_json(changed=True, results=api_rval, state="absent")
+
+
+ if state == 'present':
+ ########
+ # Create
+ ########
+ if not ocrouter.exists():
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed a create.')
+
+ api_rval = ocrouter.create()
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ ########
+ # Update
+ ########
+ if not ocrouter.needs_update():
+ module.exit_json(changed=False, state="present")
+
+ if module.check_mode:
+ module.exit_json(change=False, msg='Would have performed an update.')
+
+ api_rval = ocrouter.update()
+
+ if api_rval['returncode'] != 0:
+ module.fail_json(msg=api_rval)
+
+ module.exit_json(changed=True, results=api_rval, state="present")
+
+ module.exit_json(failed=True,
+ changed=False,
+ results='Unknown state passed. %s' % state,
+ state="unknown")
+
+# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import, locally-disabled
+# import module snippets. This are required
+from ansible.module_utils.basic import *
+main()
diff --git a/roles/lib_openshift_api/library/oc_edit.py b/roles/lib_openshift_api/library/oc_edit.py
index 44e77331d..e43b6175a 100644
--- a/roles/lib_openshift_api/library/oc_edit.py
+++ b/roles/lib_openshift_api/library/oc_edit.py
@@ -27,12 +27,12 @@ yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
- ''' Constructor for OpenshiftOC '''
+ ''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
@@ -65,15 +65,15 @@ class OpenShiftCLI(object):
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
- return self.oc_cmd(cmd)
+ return self.openshift_cmd(cmd)
def _create(self, fname):
'''return all pods '''
- return self.oc_cmd(['create', '-f', fname, '-n', self.namespace])
+ return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname):
'''return all pods '''
- return self.oc_cmd(['delete', resource, rname, '-n', self.namespace])
+ return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
def _get(self, resource, rname=None):
'''return a secret by name '''
@@ -81,7 +81,7 @@ class OpenShiftCLI(object):
if rname:
cmd.append(rname)
- rval = self.oc_cmd(cmd, output=True)
+ rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
@@ -91,10 +91,15 @@ class OpenShiftCLI(object):
return rval
- def oc_cmd(self, cmd, output=False):
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
'''Base command for oc '''
#cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = ['/usr/bin/oc']
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
cmds.extend(cmd)
rval = {}
@@ -112,18 +117,21 @@ class OpenShiftCLI(object):
proc.wait()
stdout = proc.stdout.read()
stderr = proc.stderr.read()
-
rval = {"returncode": proc.returncode,
"results": results,
+ "cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ err = err.message
+ elif output_type == 'raw':
+ rval['results'] = stdout
if self.verbose:
print stdout
@@ -226,11 +234,13 @@ class Utils(object):
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
- def check_def_equal(user_def, result_def, debug=False):
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
@@ -239,11 +249,27 @@ class Utils(object):
# Both are lists
if isinstance(value, list):
if not isinstance(user_def[key], list):
+ if debug:
+ print 'user_def[key] is not a list'
return False
- # lists should be identical
- if value != user_def[key]:
- return False
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print 'sending list - list'
+ print type(values[0])
+ print type(values[1])
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print 'list compare returned false'
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print 'value should be identical'
+ print value
+ print user_def[key]
+ return False
# recurse on a dictionary
elif isinstance(value, dict):
@@ -262,10 +288,11 @@ class Utils(object):
print "keys are not equal in dict"
return False
- result = Utils.check_def_equal(user_def[key], value, debug=debug)
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
+ print result
return False
# Verify each key, value pair is the same
diff --git a/roles/lib_openshift_api/library/oc_obj.py b/roles/lib_openshift_api/library/oc_obj.py
index c058072e3..f0ea66aee 100644
--- a/roles/lib_openshift_api/library/oc_obj.py
+++ b/roles/lib_openshift_api/library/oc_obj.py
@@ -27,12 +27,12 @@ yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
- ''' Constructor for OpenshiftOC '''
+ ''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
@@ -65,15 +65,15 @@ class OpenShiftCLI(object):
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
- return self.oc_cmd(cmd)
+ return self.openshift_cmd(cmd)
def _create(self, fname):
'''return all pods '''
- return self.oc_cmd(['create', '-f', fname, '-n', self.namespace])
+ return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname):
'''return all pods '''
- return self.oc_cmd(['delete', resource, rname, '-n', self.namespace])
+ return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
def _get(self, resource, rname=None):
'''return a secret by name '''
@@ -81,7 +81,7 @@ class OpenShiftCLI(object):
if rname:
cmd.append(rname)
- rval = self.oc_cmd(cmd, output=True)
+ rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
@@ -91,10 +91,15 @@ class OpenShiftCLI(object):
return rval
- def oc_cmd(self, cmd, output=False):
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
'''Base command for oc '''
#cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = ['/usr/bin/oc']
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
cmds.extend(cmd)
rval = {}
@@ -112,18 +117,21 @@ class OpenShiftCLI(object):
proc.wait()
stdout = proc.stdout.read()
stderr = proc.stderr.read()
-
rval = {"returncode": proc.returncode,
"results": results,
+ "cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ err = err.message
+ elif output_type == 'raw':
+ rval['results'] = stdout
if self.verbose:
print stdout
@@ -226,11 +234,13 @@ class Utils(object):
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
- def check_def_equal(user_def, result_def, debug=False):
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
@@ -239,11 +249,27 @@ class Utils(object):
# Both are lists
if isinstance(value, list):
if not isinstance(user_def[key], list):
+ if debug:
+ print 'user_def[key] is not a list'
return False
- # lists should be identical
- if value != user_def[key]:
- return False
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print 'sending list - list'
+ print type(values[0])
+ print type(values[1])
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print 'list compare returned false'
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print 'value should be identical'
+ print value
+ print user_def[key]
+ return False
# recurse on a dictionary
elif isinstance(value, dict):
@@ -262,10 +288,11 @@ class Utils(object):
print "keys are not equal in dict"
return False
- result = Utils.check_def_equal(user_def[key], value, debug=debug)
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
+ print result
return False
# Verify each key, value pair is the same
@@ -549,7 +576,7 @@ class OCObject(OpenShiftCLI):
data = Utils.get_resource_file(files[0], content_type)
# if equal then no need. So not equal is True
- return not Utils.check_def_equal(data, objects['results'][0], True)
+ return not Utils.check_def_equal(data, objects['results'][0], skip_keys=None, debug=False)
else:
data = content
diff --git a/roles/lib_openshift_api/library/oc_secret.py b/roles/lib_openshift_api/library/oc_secret.py
index a03022e35..ca58d7139 100644
--- a/roles/lib_openshift_api/library/oc_secret.py
+++ b/roles/lib_openshift_api/library/oc_secret.py
@@ -27,12 +27,12 @@ yaml.add_constructor(u'tag:yaml.org,2002:timestamp', timestamp_constructor)
# pylint: disable=too-few-public-methods
class OpenShiftCLI(object):
- ''' Class to wrap the oc command line tools '''
+ ''' Class to wrap the command line tools '''
def __init__(self,
namespace,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
- ''' Constructor for OpenshiftOC '''
+ ''' Constructor for OpenshiftCLI '''
self.namespace = namespace
self.verbose = verbose
self.kubeconfig = kubeconfig
@@ -65,15 +65,15 @@ class OpenShiftCLI(object):
cmd = ['-n', self.namespace, 'replace', '-f', fname]
if force:
cmd.append('--force')
- return self.oc_cmd(cmd)
+ return self.openshift_cmd(cmd)
def _create(self, fname):
'''return all pods '''
- return self.oc_cmd(['create', '-f', fname, '-n', self.namespace])
+ return self.openshift_cmd(['create', '-f', fname, '-n', self.namespace])
def _delete(self, resource, rname):
'''return all pods '''
- return self.oc_cmd(['delete', resource, rname, '-n', self.namespace])
+ return self.openshift_cmd(['delete', resource, rname, '-n', self.namespace])
def _get(self, resource, rname=None):
'''return a secret by name '''
@@ -81,7 +81,7 @@ class OpenShiftCLI(object):
if rname:
cmd.append(rname)
- rval = self.oc_cmd(cmd, output=True)
+ rval = self.openshift_cmd(cmd, output=True)
# Ensure results are retuned in an array
if rval.has_key('items'):
@@ -91,10 +91,15 @@ class OpenShiftCLI(object):
return rval
- def oc_cmd(self, cmd, output=False):
+ def openshift_cmd(self, cmd, oadm=False, output=False, output_type='json'):
'''Base command for oc '''
#cmds = ['/usr/bin/oc', '--config', self.kubeconfig]
- cmds = ['/usr/bin/oc']
+ cmds = []
+ if oadm:
+ cmds = ['/usr/bin/oadm']
+ else:
+ cmds = ['/usr/bin/oc']
+
cmds.extend(cmd)
rval = {}
@@ -112,18 +117,21 @@ class OpenShiftCLI(object):
proc.wait()
stdout = proc.stdout.read()
stderr = proc.stderr.read()
-
rval = {"returncode": proc.returncode,
"results": results,
+ "cmd": ' '.join(cmds),
}
if proc.returncode == 0:
if output:
- try:
- rval['results'] = json.loads(stdout)
- except ValueError as err:
- if "No JSON object could be decoded" in err.message:
- err = err.message
+ if output_type == 'json':
+ try:
+ rval['results'] = json.loads(stdout)
+ except ValueError as err:
+ if "No JSON object could be decoded" in err.message:
+ err = err.message
+ elif output_type == 'raw':
+ rval['results'] = stdout
if self.verbose:
print stdout
@@ -226,11 +234,13 @@ class Utils(object):
# Disabling too-many-branches. This is a yaml dictionary comparison function
# pylint: disable=too-many-branches,too-many-return-statements
@staticmethod
- def check_def_equal(user_def, result_def, debug=False):
+ def check_def_equal(user_def, result_def, skip_keys=None, debug=False):
''' Given a user defined definition, compare it with the results given back by our query. '''
# Currently these values are autogenerated and we do not need to check them
skip = ['metadata', 'status']
+ if skip_keys:
+ skip.extend(skip_keys)
for key, value in result_def.items():
if key in skip:
@@ -239,11 +249,27 @@ class Utils(object):
# Both are lists
if isinstance(value, list):
if not isinstance(user_def[key], list):
+ if debug:
+ print 'user_def[key] is not a list'
return False
- # lists should be identical
- if value != user_def[key]:
- return False
+ for values in zip(user_def[key], value):
+ if isinstance(values[0], dict) and isinstance(values[1], dict):
+ if debug:
+ print 'sending list - list'
+ print type(values[0])
+ print type(values[1])
+ result = Utils.check_def_equal(values[0], values[1], skip_keys=skip_keys, debug=debug)
+ if not result:
+ print 'list compare returned false'
+ return False
+
+ elif value != user_def[key]:
+ if debug:
+ print 'value should be identical'
+ print value
+ print user_def[key]
+ return False
# recurse on a dictionary
elif isinstance(value, dict):
@@ -262,10 +288,11 @@ class Utils(object):
print "keys are not equal in dict"
return False
- result = Utils.check_def_equal(user_def[key], value, debug=debug)
+ result = Utils.check_def_equal(user_def[key], value, skip_keys=skip_keys, debug=debug)
if not result:
if debug:
print "dict returned false"
+ print result
return False
# Verify each key, value pair is the same
@@ -519,7 +546,7 @@ class Secret(OpenShiftCLI):
cmd = ['-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
- return self.oc_cmd(cmd)
+ return self.openshift_cmd(cmd)
def update(self, files, force=False):
'''run update secret
@@ -550,7 +577,7 @@ class Secret(OpenShiftCLI):
cmd = ['-ojson', '-n%s' % self.namespace, 'secrets', 'new', self.name]
cmd.extend(secrets)
- return self.oc_cmd(cmd, output=True)
+ return self.openshift_cmd(cmd, output=True)
diff --git a/roles/lib_zabbix/library/zbx_user.py b/roles/lib_zabbix/library/zbx_user.py
index d10ffb9ff..68c5cfbfe 100644
--- a/roles/lib_zabbix/library/zbx_user.py
+++ b/roles/lib_zabbix/library/zbx_user.py
@@ -97,6 +97,7 @@ def main():
last_name=dict(default=None, type='str'),
user_type=dict(default=None, type='str'),
password=dict(default=None, type='str'),
+ refresh=dict(default=None, type='int'),
update_password=dict(default=False, type='bool'),
user_groups=dict(default=[], type='list'),
state=dict(default='present', type='str'),
@@ -137,6 +138,7 @@ def main():
'usrgrps': get_usergroups(zapi, module.params['user_groups']),
'name': module.params['first_name'],
'surname': module.params['last_name'],
+ 'refresh': module.params['refresh'],
'type': get_usertype(module.params['user_type']),
}
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 1848619e0..eb762e33f 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -52,4 +52,4 @@
set_fact:
docker_version: "{{ '1.8.2' }}"
when: " ( common_version.stdout | default('0.0', True) | version_compare('3.2','<') and openshift.common.service_type == 'atomic-openshift' ) or
- ( common_version.stdout | default('0.0', True) | version_compare('1.2','<') and openshift.common.service_type == 'origin' )"
+ ( common_version.stdout | default('0.0', True) | version_compare('1.1.4','<') and openshift.common.service_type == 'origin' )"
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 0d31d4ddf..32e608e86 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -837,6 +837,25 @@ def set_sdn_facts_if_unset(facts, system_facts):
return facts
+def migrate_oauth_template_facts(facts):
+ """
+ Migrate an old oauth template fact to a newer format if it's present.
+
+ The legacy 'oauth_template' fact was just a filename, and assumed you were
+ setting the 'login' template.
+
+ The new pluralized 'oauth_templates' fact is a dict mapping the template
+ name to a filename.
+
+ Simplify the code after this by merging the old fact into the new.
+ """
+ if 'master' in facts and 'oauth_template' in facts['master']:
+ if 'oauth_templates' not in facts['master']:
+ facts['master']['oauth_templates'] = {"login": facts['master']['oauth_template']}
+ elif 'login' not in facts['master']['oauth_templates']:
+ facts['master']['oauth_templates']['login'] = facts['master']['oauth_template']
+ return facts
+
def format_url(use_ssl, hostname, port, path=''):
""" Format url based on ssl flag, hostname, port and path
@@ -924,12 +943,13 @@ def build_kubelet_args(facts):
if 'node' in facts:
kubelet_args = {}
if 'cloudprovider' in facts:
- if facts['cloudprovider']['kind'] == 'aws':
- kubelet_args['cloud-provider'] = ['aws']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
- if facts['cloudprovider']['kind'] == 'openstack':
- kubelet_args['cloud-provider'] = ['openstack']
- kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
+ if 'kind' in facts['cloudprovider']:
+ if facts['cloudprovider']['kind'] == 'aws':
+ kubelet_args['cloud-provider'] = ['aws']
+ kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
+ if facts['cloudprovider']['kind'] == 'openstack':
+ kubelet_args['cloud-provider'] = ['openstack']
+ kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if kubelet_args != {}:
facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, [], [])
return facts
@@ -941,12 +961,13 @@ def build_controller_args(facts):
if 'master' in facts:
controller_args = {}
if 'cloudprovider' in facts:
- if facts['cloudprovider']['kind'] == 'aws':
- controller_args['cloud-provider'] = ['aws']
- controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
- if facts['cloudprovider']['kind'] == 'openstack':
- controller_args['cloud-provider'] = ['openstack']
- controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
+ if 'kind' in facts['cloudprovider']:
+ if facts['cloudprovider']['kind'] == 'aws':
+ controller_args['cloud-provider'] = ['aws']
+ controller_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
+ if facts['cloudprovider']['kind'] == 'openstack':
+ controller_args['cloud-provider'] = ['openstack']
+ controller_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if controller_args != {}:
facts = merge_facts({'master': {'controller_args': controller_args}}, facts, [], [])
return facts
@@ -958,12 +979,13 @@ def build_api_server_args(facts):
if 'master' in facts:
api_server_args = {}
if 'cloudprovider' in facts:
- if facts['cloudprovider']['kind'] == 'aws':
- api_server_args['cloud-provider'] = ['aws']
- api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
- if facts['cloudprovider']['kind'] == 'openstack':
- api_server_args['cloud-provider'] = ['openstack']
- api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
+ if 'kind' in facts['cloudprovider']:
+ if facts['cloudprovider']['kind'] == 'aws':
+ api_server_args['cloud-provider'] = ['aws']
+ api_server_args['cloud-config'] = [cloud_cfg_path + '/aws.conf']
+ if facts['cloudprovider']['kind'] == 'openstack':
+ api_server_args['cloud-provider'] = ['openstack']
+ api_server_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf']
if api_server_args != {}:
facts = merge_facts({'master': {'api_server_args': api_server_args}}, facts, [], [])
return facts
@@ -1118,12 +1140,21 @@ def merge_facts(orig, new, additive_facts_to_overwrite, protected_facts_to_overw
"""
additive_facts = ['named_certificates']
protected_facts = ['ha', 'master_count']
+
+ # Facts we do not ever want to merge. These originate in inventory variables
+ # and typically contain JSON dicts. We don't ever want to trigger a merge
+ # here, just completely overwrite with the new if they are present there.
+ overwrite_facts = ['admission_plugin_config',
+ 'kube_admission_plugin_config']
+
facts = dict()
for key, value in orig.iteritems():
# Key exists in both old and new facts.
if key in new:
+ if key in overwrite_facts:
+ facts[key] = copy.deepcopy(new[key])
# Continue to recurse if old and new fact is a dictionary.
- if isinstance(value, dict) and isinstance(new[key], dict):
+ elif isinstance(value, dict) and isinstance(new[key], dict):
# Collect the subset of additive facts to overwrite if
# key matches. These will be passed to the subsequent
# merge_facts call.
@@ -1441,6 +1472,7 @@ class OpenShiftFacts(object):
local_facts,
additive_facts_to_overwrite,
protected_facts_to_overwrite)
+ facts = migrate_oauth_template_facts(facts)
facts['current_config'] = get_current_config(facts)
facts = set_url_facts_if_unset(facts)
facts = set_project_cfg_facts_if_unset(facts)
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index 7f1576682..862cfa8f1 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -4,7 +4,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
IMAGE_VERSION={{ openshift_version }}
{% endif %}
-{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
+{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}
AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
index 5e6577d95..66d76978e 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2
@@ -12,7 +12,7 @@ Requires=docker.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-api
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-api
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-api --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-api -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-api
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
index 04c84a84a..75759c133 100644
--- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
+++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2
@@ -11,7 +11,7 @@ PartOf=docker.service
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers
Environment=GOTRACEBACK=crash
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type}}-master-controllers
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master-controllers --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master-controllers -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master-controllers
LimitNOFILE=131072
diff --git a/roles/openshift_master/templates/docker/master.docker.service.j2 b/roles/openshift_master/templates/docker/master.docker.service.j2
index 6bd0dcf56..d02fc5342 100644
--- a/roles/openshift_master/templates/docker/master.docker.service.j2
+++ b/roles/openshift_master/templates/docker/master.docker.service.j2
@@ -8,7 +8,7 @@ Wants=etcd_container.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-master
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-master
-ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/docker run --rm --privileged --net=host --name {{ openshift.common.service_type }}-master --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-master -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v /var/run/docker.sock:/var/run/docker.sock -v {{ openshift.common.config_base }}:{{ openshift.common.config_base }} {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} {{ openshift.master.master_image }}:${IMAGE_VERSION} start master --config=${CONFIG_FILE} $OPTIONS
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-master
Restart=always
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index 813a58d60..e89fdc0ce 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -1,3 +1,10 @@
+admissionConfig:
+{% if 'admission_plugin_order' in openshift.master %}
+ pluginOrderOverride:{{ openshift.master.admission_plugin_order | to_padded_yaml(level=2) }}
+{% endif %}
+{% if 'admission_plugin_config' in openshift.master %}
+ pluginConfig:{{ openshift.master.admission_plugin_config | to_padded_yaml(level=2) }}
+{% endif %}
apiLevels:
{% if not openshift.common.version_gte_3_1_or_1_1 | bool %}
- v1beta3
@@ -96,6 +103,13 @@ kubernetesMasterConfig:
- v1beta3
- v1
{% endif %}
+ admissionConfig:
+{% if 'kube_admission_plugin_order' in openshift.master %}
+ pluginOrderOverride:{{ openshift.master.kube_admission_plugin_order | to_padded_yaml(level=3) }}
+{% endif %}
+{% if 'kube_admission_plugin_config' in openshift.master %}
+ pluginConfig:{{ openshift.master.kube_admission_plugin_config | to_padded_yaml(level=3) }}
+{% endif %}
apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }}
controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }}
masterCount: {{ openshift.master.master_count if openshift.master.cluster_method | default(None) == 'native' else 1 }}
@@ -123,9 +137,11 @@ networkConfig:
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
serviceNetworkCIDR: {{ openshift.master.portal_net }}
oauthConfig:
-{% if 'oauth_template' in openshift.master %}
- templates:
- login: {{ openshift.master.oauth_template }}
+{% if 'oauth_always_show_provider_selection' in openshift.master %}
+ alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}
+{% endif %}
+{% if 'oauth_templates' in openshift.master %}
+ templates:{{ openshift.master.oauth_templates | to_padded_yaml(level=2) }}
{% endif %}
assetPublicURL: {{ openshift.master.public_console_url }}/
grantConfig:
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index fa2323a2c..69754ee10 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -4,7 +4,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
IMAGE_VERSION={{ openshift_version }}
{% endif %}
-{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
+{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}
AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 632dfbb8a..048a4305a 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -4,7 +4,7 @@ CONFIG_FILE={{ openshift_master_config_file }}
IMAGE_VERSION={{ openshift_version }}
{% endif %}
-{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
+{% if 'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws %}
AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}
AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}
{% endif %}
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 2a3e38af4..f43b8c59d 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -65,3 +65,10 @@
master_image: "{{ osm_image | default(None) }}"
scheduler_predicates: "{{ openshift_master_scheduler_predicates | default(None) }}"
scheduler_priorities: "{{ openshift_master_scheduler_priorities | default(None) }}"
+ admission_plugin_order: "{{openshift_master_admission_plugin_order | default(None) }}"
+ admission_plugin_config: "{{openshift_master_admission_plugin_config | default(None) }}"
+ kube_admission_plugin_order: "{{openshift_master_kube_admission_plugin_order | default(None) }}"
+ kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}"
+ oauth_template: "{{ openshift_master_oauth_template | default(None) }}" # deprecated in origin 1.2 / OSE 3.2
+ oauth_templates: "{{ openshift_master_oauth_templates | default(None) }}"
+ oauth_always_show_provider_selection: "{{ openshift_master_oauth_always_show_provider_selection | default(None) }}"
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 80b3e710d..eca4848c1 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -92,7 +92,7 @@
line: "AWS_ACCESS_KEY_ID={{ openshift.cloudprovider.aws.access_key }}"
- regex: '^AWS_SECRET_ACCESS_KEY='
line: "AWS_SECRET_ACCESS_KEY={{ openshift.cloudprovider.aws.secret_key }}"
- when: "'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws"
+ when: "'cloudprovider' in openshift and 'aws' in openshift.cloudprovider and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind == 'aws' and 'access_key' in openshift.cloudprovider.aws and 'secret_key' in openshift.cloudprovider.aws"
notify:
- restart node
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index a8accca47..ff5a97fe0 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -12,7 +12,7 @@ Wants={{ openshift.common.service_type }}-master.service
[Service]
EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
ExecStartPre=-/usr/bin/docker rm -f {{ openshift.common.service_type }}-node
-ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev {{ openshift.node.node_image }}:${IMAGE_VERSION}
+ExecStart=/usr/bin/docker run --name {{ openshift.common.service_type }}-node --rm --privileged --net=host --pid=host --env-file=/etc/sysconfig/{{ openshift.common.service_type }}-node -v /:/rootfs:ro -e CONFIG_FILE=${CONFIG_FILE} -e OPTIONS=${OPTIONS} -e HOST=/rootfs -e HOST_ETC=/host-etc -v {{ openshift.common.data_dir }}:{{ openshift.common.data_dir }} -v {{ openshift.common.config_base }}/node:{{ openshift.common.config_base }}/node {% if 'cloudprovider' in openshift and 'kind' in openshift.cloudprovider and openshift.cloudprovider.kind != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} -v /etc/localtime:/etc/localtime:ro -v /etc/machine-id:/etc/machine-id:ro -v /run:/run -v /sys:/sys:ro -v /usr/bin/docker:/usr/bin/docker:ro -v /var/lib/docker:/var/lib/docker -v /lib/modules:/lib/modules -v /etc/origin/openvswitch:/etc/openvswitch -v /etc/origin/sdn:/etc/openshift-sdn -v /etc/systemd/system:/host-etc/systemd/system -v /var/log:/var/log -v /dev:/dev {{ openshift.node.node_image }}:${IMAGE_VERSION}
ExecStartPost=/usr/bin/sleep 10
ExecStop=/usr/bin/docker stop {{ openshift.common.service_type }}-node
SyslogIdentifier={{ openshift.common.service_type }}-node
diff --git a/roles/openshift_storage_nfs/README.md b/roles/openshift_storage_nfs/README.md
index dd988b849..dec5bf131 100644
--- a/roles/openshift_storage_nfs/README.md
+++ b/roles/openshift_storage_nfs/README.md
@@ -21,23 +21,22 @@ From this role:
| openshift_hosted_registry_storage_volume_name | registry | Registry volume within openshift_hosted_registry_volume_dir |
| openshift_hosted_registry_storage_nfs_options | *(rw,root_squash) | NFS options for configured exports. |
-
-From openshift_common:
-| Name | Default Value | |
-|-------------------------------|----------------|----------------------------------------|
-| openshift_debug_level | 2 | Global openshift debug log verbosity |
-
-
Dependencies
------------
+* os_firewall
+* openshift_facts
+* openshift_repos
+
Example Playbook
----------------
+```
- name: Configure nfs hosts
hosts: oo_nfs_to_config
roles:
- role: openshift_storage_nfs
+```
License
-------
diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml
index 2975daf52..c6815d697 100644
--- a/roles/openshift_storage_nfs/meta/main.yml
+++ b/roles/openshift_storage_nfs/meta/main.yml
@@ -11,5 +11,5 @@ galaxy_info:
- 7
dependencies:
- { role: os_firewall }
-- { role: openshift_common }
+- { role: openshift_facts }
- { role: openshift_repos }