From 6beef83cd9c2d76c0b4bdff1208d64a41d17818e Mon Sep 17 00:00:00 2001 From: Kenny Woodson Date: Mon, 23 Jan 2017 17:09:19 -0500 Subject: Adding integration test for oc_scale. --- roles/lib_openshift/src/ansible/oc_scale.py | 2 +- roles/lib_openshift/src/class/oc_scale.py | 1 + roles/lib_openshift/src/lib/base.py | 5 +- roles/lib_openshift/src/lib/deploymentconfig.py | 337 +++++++++++++++++++++ .../lib_openshift/src/lib/replicationcontroller.py | 14 + roles/lib_openshift/src/sources.yml | 2 + .../src/test/integration/oc_scale.yml | 79 ++++- 7 files changed, 433 insertions(+), 7 deletions(-) create mode 100644 roles/lib_openshift/src/lib/deploymentconfig.py create mode 100644 roles/lib_openshift/src/lib/replicationcontroller.py (limited to 'roles/lib_openshift/src') diff --git a/roles/lib_openshift/src/ansible/oc_scale.py b/roles/lib_openshift/src/ansible/oc_scale.py index 58f5c2ee3..57f9902d6 100644 --- a/roles/lib_openshift/src/ansible/oc_scale.py +++ b/roles/lib_openshift/src/ansible/oc_scale.py @@ -18,7 +18,7 @@ def main(): ), supports_check_mode=True, ) - rval = OCScale.run_ansible(params, module.check_mode) + rval = OCScale.run_ansible(module.params, module.check_mode) if 'failed' in rval: module.fail_json(**rval) diff --git a/roles/lib_openshift/src/class/oc_scale.py b/roles/lib_openshift/src/class/oc_scale.py index bd8f9da93..68dc6ffd4 100644 --- a/roles/lib_openshift/src/class/oc_scale.py +++ b/roles/lib_openshift/src/class/oc_scale.py @@ -58,6 +58,7 @@ class OCScale(OpenShiftCLI): ''' verify whether an update is needed ''' return self.resource.needs_update_replicas(self.replicas) + # pylint: disable=too-many-return-statements @staticmethod def run_ansible(params, check_mode): '''perform the idempotent ansible logic''' diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index db5f4e890..8b5491d6b 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -111,12 +111,11 @@ class OpenShiftCLI(object): cmd = ['get', resource] if selector: cmd.append('--selector=%s' % selector) + elif rname: + cmd.append(rname) cmd.extend(['-o', 'json']) - if rname: - cmd.append(rname) - rval = self.openshift_cmd(cmd, output=True) # Ensure results are retuned in an array diff --git a/roles/lib_openshift/src/lib/deploymentconfig.py b/roles/lib_openshift/src/lib/deploymentconfig.py new file mode 100644 index 000000000..c0e9af0a1 --- /dev/null +++ b/roles/lib_openshift/src/lib/deploymentconfig.py @@ -0,0 +1,337 @@ +# pylint: skip-file + +# pylint: disable=too-many-public-methods +class DeploymentConfig(Yedit): + ''' Class to wrap the oc command line tools ''' + default_deployment_config = ''' +apiVersion: v1 +kind: DeploymentConfig +metadata: + name: default_dc + namespace: default +spec: + replicas: 0 + selector: + default_dc: default_dc + strategy: + resources: {} + rollingParams: + intervalSeconds: 1 + maxSurge: 0 + maxUnavailable: 25% + timeoutSeconds: 600 + updatePercent: -25 + updatePeriodSeconds: 1 + type: Rolling + template: + metadata: + spec: + containers: + - env: + - name: default + value: default + image: default + imagePullPolicy: IfNotPresent + name: default_dc + ports: + - containerPort: 8000 + hostPort: 8000 + protocol: TCP + name: default_port + resources: {} + terminationMessagePath: /dev/termination-log + dnsPolicy: ClusterFirst + hostNetwork: true + nodeSelector: + type: compute + restartPolicy: Always + securityContext: {} + serviceAccount: default + serviceAccountName: default + terminationGracePeriodSeconds: 30 + triggers: + - type: ConfigChange +''' + + replicas_path = "spec.replicas" + env_path = "spec.template.spec.containers[0].env" + volumes_path = "spec.template.spec.volumes" + container_path = "spec.template.spec.containers" + volume_mounts_path = "spec.template.spec.containers[0].volumeMounts" + + def __init__(self, content=None): + ''' Constructor for OpenshiftOC ''' + if not content: + content = DeploymentConfig.default_deployment_config + + super(DeploymentConfig, self).__init__(content=content) + + # pylint: disable=no-member + def add_env_value(self, key, value): + ''' add key, value pair to env array ''' + rval = False + env = self.get_env_vars() + if env: + env.append({'name': key, 'value': value}) + rval = True + else: + result = self.put(DeploymentConfig.env_path, {'name': key, 'value': value}) + rval = result[0] + + return rval + + def exists_env_value(self, key, value): + ''' return whether a key, value pair exists ''' + results = self.get_env_vars() + if not results: + return False + + for result in results: + if result['name'] == key and result['value'] == value: + return True + + return False + + def exists_env_key(self, key): + ''' return whether a key, value pair exists ''' + results = self.get_env_vars() + if not results: + return False + + for result in results: + if result['name'] == key: + return True + + return False + + def get_env_vars(self): + '''return a environment variables ''' + return self.get(DeploymentConfig.env_path) or [] + + def delete_env_var(self, keys): + '''delete a list of keys ''' + if not isinstance(keys, list): + keys = [keys] + + env_vars_array = self.get_env_vars() + modified = False + idx = None + for key in keys: + for env_idx, env_var in enumerate(env_vars_array): + if env_var['name'] == key: + idx = env_idx + break + + if idx: + modified = True + del env_vars_array[idx] + + if modified: + return True + + return False + + def update_env_var(self, key, value): + '''place an env in the env var list''' + + env_vars_array = self.get_env_vars() + idx = None + for env_idx, env_var in enumerate(env_vars_array): + if env_var['name'] == key: + idx = env_idx + break + + if idx: + env_vars_array[idx]['value'] = value + else: + self.add_env_value(key, value) + + return True + + def exists_volume_mount(self, volume_mount): + ''' return whether a volume mount exists ''' + exist_volume_mounts = self.get_volume_mounts() + + if not exist_volume_mounts: + return False + + volume_mount_found = False + for exist_volume_mount in exist_volume_mounts: + if exist_volume_mount['name'] == volume_mount['name']: + volume_mount_found = True + break + + return volume_mount_found + + def exists_volume(self, volume): + ''' return whether a volume exists ''' + exist_volumes = self.get_volumes() + + volume_found = False + for exist_volume in exist_volumes: + if exist_volume['name'] == volume['name']: + volume_found = True + break + + return volume_found + + def find_volume_by_name(self, volume, mounts=False): + ''' return the index of a volume ''' + volumes = [] + if mounts: + volumes = self.get_volume_mounts() + else: + volumes = self.get_volumes() + for exist_volume in volumes: + if exist_volume['name'] == volume['name']: + return exist_volume + + return None + + def get_replicas(self): + ''' return replicas setting ''' + return self.get(DeploymentConfig.replicas_path) + + def get_volume_mounts(self): + '''return volume mount information ''' + return self.get_volumes(mounts=True) + + def get_volumes(self, mounts=False): + '''return volume mount information ''' + if mounts: + return self.get(DeploymentConfig.volume_mounts_path) or [] + + return self.get(DeploymentConfig.volumes_path) or [] + + def delete_volume_by_name(self, volume): + '''delete a volume ''' + modified = False + exist_volume_mounts = self.get_volume_mounts() + exist_volumes = self.get_volumes() + del_idx = None + for idx, exist_volume in enumerate(exist_volumes): + if exist_volume.has_key('name') and exist_volume['name'] == volume['name']: + del_idx = idx + break + + if del_idx != None: + del exist_volumes[del_idx] + modified = True + + del_idx = None + for idx, exist_volume_mount in enumerate(exist_volume_mounts): + if exist_volume_mount.has_key('name') and exist_volume_mount['name'] == volume['name']: + del_idx = idx + break + + if del_idx != None: + del exist_volume_mounts[idx] + modified = True + + return modified + + def add_volume_mount(self, volume_mount): + ''' add a volume or volume mount to the proper location ''' + exist_volume_mounts = self.get_volume_mounts() + + if not exist_volume_mounts and volume_mount: + self.put(DeploymentConfig.volume_mounts_path, [volume_mount]) + else: + exist_volume_mounts.append(volume_mount) + + def add_volume(self, volume): + ''' add a volume or volume mount to the proper location ''' + exist_volumes = self.get_volumes() + if not volume: + return + + if not exist_volumes: + self.put(DeploymentConfig.volumes_path, [volume]) + else: + exist_volumes.append(volume) + + def update_replicas(self, replicas): + ''' update replicas value ''' + self.put(DeploymentConfig.replicas_path, replicas) + + def update_volume(self, volume): + '''place an env in the env var list''' + exist_volumes = self.get_volumes() + + if not volume: + return False + + # update the volume + update_idx = None + for idx, exist_vol in enumerate(exist_volumes): + if exist_vol['name'] == volume['name']: + update_idx = idx + break + + if update_idx != None: + exist_volumes[update_idx] = volume + else: + self.add_volume(volume) + + return True + + def update_volume_mount(self, volume_mount): + '''place an env in the env var list''' + modified = False + + exist_volume_mounts = self.get_volume_mounts() + + if not volume_mount: + return False + + # update the volume mount + for exist_vol_mount in exist_volume_mounts: + if exist_vol_mount['name'] == volume_mount['name']: + if exist_vol_mount.has_key('mountPath') and \ + str(exist_vol_mount['mountPath']) != str(volume_mount['mountPath']): + exist_vol_mount['mountPath'] = volume_mount['mountPath'] + modified = True + break + + if not modified: + self.add_volume_mount(volume_mount) + modified = True + + return modified + + def needs_update_volume(self, volume, volume_mount): + ''' verify a volume update is needed ''' + exist_volume = self.find_volume_by_name(volume) + exist_volume_mount = self.find_volume_by_name(volume, mounts=True) + results = [] + results.append(exist_volume['name'] == volume['name']) + + if volume.has_key('secret'): + results.append(exist_volume.has_key('secret')) + results.append(exist_volume['secret']['secretName'] == volume['secret']['secretName']) + results.append(exist_volume_mount['name'] == volume_mount['name']) + results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) + + elif volume.has_key('emptyDir'): + results.append(exist_volume_mount['name'] == volume['name']) + results.append(exist_volume_mount['mountPath'] == volume_mount['mountPath']) + + elif volume.has_key('persistentVolumeClaim'): + pvc = 'persistentVolumeClaim' + results.append(exist_volume.has_key(pvc)) + if results[-1]: + results.append(exist_volume[pvc]['claimName'] == volume[pvc]['claimName']) + + if volume[pvc].has_key('claimSize'): + results.append(exist_volume[pvc]['claimSize'] == volume[pvc]['claimSize']) + + elif volume.has_key('hostpath'): + results.append(exist_volume.has_key('hostPath')) + results.append(exist_volume['hostPath']['path'] == volume_mount['mountPath']) + + return not all(results) + + def needs_update_replicas(self, replicas): + ''' verify whether a replica update is needed ''' + current_reps = self.get(DeploymentConfig.replicas_path) + return not current_reps == replicas diff --git a/roles/lib_openshift/src/lib/replicationcontroller.py b/roles/lib_openshift/src/lib/replicationcontroller.py new file mode 100644 index 000000000..7dafc60f1 --- /dev/null +++ b/roles/lib_openshift/src/lib/replicationcontroller.py @@ -0,0 +1,14 @@ +# pylint: skip-file + +# pylint: disable=too-many-public-methods +class ReplicationController(DeploymentConfig): + ''' Class to wrap the oc command line tools ''' + replicas_path = "spec.replicas" + env_path = "spec.template.spec.containers[0].env" + volumes_path = "spec.template.spec.volumes" + container_path = "spec.template.spec.containers" + volume_mounts_path = "spec.template.spec.containers[0].volumeMounts" + + def __init__(self, content): + ''' Constructor for OpenshiftOC ''' + super(ReplicationController, self).__init__(content=content) diff --git a/roles/lib_openshift/src/sources.yml b/roles/lib_openshift/src/sources.yml index 00543c64b..4623407bc 100644 --- a/roles/lib_openshift/src/sources.yml +++ b/roles/lib_openshift/src/sources.yml @@ -34,6 +34,8 @@ oc_scale.py: - doc/scale - ../../lib_utils/src/class/yedit.py - lib/base.py +- lib/deploymentconfig.py +- lib/replicationcontroller.py - class/oc_scale.py - ansible/oc_scale.py oc_version.py: diff --git a/roles/lib_openshift/src/test/integration/oc_scale.yml b/roles/lib_openshift/src/test/integration/oc_scale.yml index d272357e8..ccc3d05de 100755 --- a/roles/lib_openshift/src/test/integration/oc_scale.yml +++ b/roles/lib_openshift/src/test/integration/oc_scale.yml @@ -5,15 +5,88 @@ gather_facts: no user: root tasks: - - name: list oc scale for default router dc +# - name: list oc scale for default router dc +# oc_scale: +# state: list +# name: router +# namespace: default +# kind: dc +# register: scaleout +# - debug: var=scaleout +# +# - assert: +# that: +# - "'result' in scaleout" +# - scaleout.result > 0 +# msg: "Did not find 'result' in returned value or result not > 0." +# +# - name: get the rc for router +# oc_obj: +# state: list +# kind: dc +# namespace: default +# selector: router=router +# register: rcout +# - debug: +# msg: "{{ rcout.results.results[0]['items'][-1]['metadata']['name'] }}" + + - name: scale dc to 1 oc_scale: + name: router + namespace: default + kind: dc + replicas: 1 + register: scaleout + - debug: var=scaleout + + # The preferred method here would be to let the module + # detect when its finished and time out + - name: let the scale happen + pause: + seconds: 10 + when: scaleout.changed + + - name: fetch the current router pods + oc_obj: + selector: router=router + namespace: default + kind: pod state: list + register: pods + - debug: var=pods + + - assert: + that: + - "'results' in pods and 'results' in pods.results" + - "{{ pods.results.results[0]['items']|length }} == 1" + msg: "Did not find 1 replica in scale results." + + - name: scale dc to 2 + oc_scale: name: router namespace: default kind: dc + replicas: 2 register: scaleout - debug: var=scaleout + # The preferred method here would be to let the module + # detect when its finished and time out + - name: let the scale happen + pause: + seconds: 30 + + - name: fetch the current router pods + oc_obj: + selector: router=router + namespace: default + kind: pod + state: list + register: pods + - debug: var=pods + - assert: - that: "scaleout.results.results[0]['metadata']['name'] == 'test'" - msg: route create failed + that: + - "'results' in pods and 'results' in pods.results" + - "{{ pods.results.results[0]['items']|length }} == 2" + msg: "Did not find 1 replica in scale results." -- cgit v1.2.3