diff options
Diffstat (limited to 'playbooks/adhoc')
-rw-r--r-- | playbooks/adhoc/uninstall.yml | 3 | ||||
-rw-r--r-- | playbooks/adhoc/upgrades/README.md | 21 | ||||
l--------- | playbooks/adhoc/upgrades/filter_plugins | 1 | ||||
-rwxr-xr-x | playbooks/adhoc/upgrades/library/openshift_upgrade_config.py | 117 | ||||
l--------- | playbooks/adhoc/upgrades/lookup_plugins | 1 | ||||
l--------- | playbooks/adhoc/upgrades/roles | 1 | ||||
-rw-r--r-- | playbooks/adhoc/upgrades/upgrade.yml | 215 |
7 files changed, 2 insertions, 357 deletions
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index e05ab43f8..e0dbad900 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -59,8 +59,8 @@ - atomic-openshift-master - atomic-openshift-node - atomic-openshift-sdn-ovs - - etcd - corosync + - etcd - openshift - openshift-master - openshift-node @@ -68,6 +68,7 @@ - openshift-sdn-ovs - openvswitch - origin + - origin-clients - origin-master - origin-node - origin-sdn-ovs diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md deleted file mode 100644 index 6de8a970f..000000000 --- a/playbooks/adhoc/upgrades/README.md +++ /dev/null @@ -1,21 +0,0 @@ -# [NOTE] -This playbook will re-run installation steps overwriting any local -modifications. You should ensure that your inventory has been updated with any -modifications you've made after your initial installation. If you find any items -that cannot be configured via ansible please open an issue at -https://github.com/openshift/openshift-ansible - -# Overview -This playbook is available as a technical preview. It currently performs the -following steps. - - * Upgrade and restart master services - * Upgrade and restart node services - * Applies latest configuration by re-running the installation playbook - * Applies the latest cluster policies - * Updates the default router if one exists - * Updates the default registry if one exists - * Updates image streams and quickstarts - -# Usage -ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins deleted file mode 120000 index b0b7a3414..000000000 --- a/playbooks/adhoc/upgrades/filter_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py b/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py deleted file mode 100755 index 60f4fd8b8..000000000 --- a/playbooks/adhoc/upgrades/library/openshift_upgrade_config.py +++ /dev/null @@ -1,117 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -# vim: expandtab:tabstop=4:shiftwidth=4 - -"""Ansible module for modifying OpenShift configs during an upgrade""" - -import os -import shutil -import yaml - -from datetime import datetime - -DOCUMENTATION = ''' ---- -module: openshift_upgrade_config -short_description: OpenShift Upgrade Config -author: Jason DeTiberus -requirements: [ ] -''' -EXAMPLES = ''' -''' - -def get_cfg_dir(): - """Return the correct config directory to use.""" - cfg_path = '/etc/origin/' - if not os.path.exists(cfg_path): - cfg_path = '/etc/openshift/' - return cfg_path - - -def upgrade_master_3_0_to_3_1(backup): - """Main upgrade method for 3.0 to 3.1.""" - changed = False - - # Facts do not get transferred to the hosts where custom modules run, - # need to make some assumptions here. - master_config = os.path.join(get_cfg_dir(), 'master/master-config.yaml') - - master_cfg_file = open(master_config, 'r') - config = yaml.safe_load(master_cfg_file.read()) - master_cfg_file.close() - - # Remove v1beta3 from apiLevels: - if 'apiLevels' in config and \ - 'v1beta3' in config['apiLevels']: - config['apiLevels'].remove('v1beta3') - changed = True - if 'apiLevels' in config['kubernetesMasterConfig'] and \ - 'v1beta3' in config['kubernetesMasterConfig']['apiLevels']: - config['kubernetesMasterConfig']['apiLevels'].remove('v1beta3') - changed = True - - # Add the new master proxy client certs: - # TODO: re-enable this once these certs are generated during upgrade: -# if 'proxyClientInfo' not in config['kubernetesMasterConfig']: -# config['kubernetesMasterConfig']['proxyClientInfo'] = { -# 'certFile': 'master.proxy-client.crt', -# 'keyFile': 'master.proxy-client.key' -# } - - if changed: - if backup: - timestamp = datetime.now().strftime('%Y%m%d%H%M%S') - basedir = os.path.split(master_config)[0] - backup_file = os.path.join(basedir, 'master-config.yaml.bak-%s' - % timestamp) - shutil.copyfile(master_config, backup_file) - # Write the modified config: - out_file = open(master_config, 'w') - out_file.write(yaml.safe_dump(config, default_flow_style=False)) - out_file.close() - - return changed - - -def upgrade_master(from_version, to_version, backup): - """Upgrade entry point.""" - if from_version == '3.0': - if to_version == '3.1': - return upgrade_master_3_0_to_3_1(backup) - - -def main(): - """ main """ - # disabling pylint errors for global-variable-undefined and invalid-name - # for 'global module' usage, since it is required to use ansible_facts - # pylint: disable=global-variable-undefined, invalid-name - global module - - module = AnsibleModule( - argument_spec=dict( - from_version=dict(required=True, choices=['3.0']), - to_version=dict(required=True, choices=['3.1']), - role=dict(required=True, choices=['master']), - backup=dict(required=False, default=True, type='bool') - ), - supports_check_mode=True, - ) - - from_version = module.params['from_version'] - to_version = module.params['to_version'] - role = module.params['role'] - backup = module.params['backup'] - - changed = False - if role == 'master': - changed = upgrade_master(from_version, to_version, backup) - - return module.exit_json(changed=changed) - -# ignore pylint errors related to the module_utils import -# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import -# import module snippets -from ansible.module_utils.basic import * - -if __name__ == '__main__': - main() diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins deleted file mode 120000 index 73cafffe5..000000000 --- a/playbooks/adhoc/upgrades/lookup_plugins +++ /dev/null @@ -1 +0,0 @@ -../../../lookup_plugins/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles deleted file mode 120000 index e2b799b9d..000000000 --- a/playbooks/adhoc/upgrades/roles +++ /dev/null @@ -1 +0,0 @@ -../../../roles/
\ No newline at end of file diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml deleted file mode 100644 index 09f991b1d..000000000 --- a/playbooks/adhoc/upgrades/upgrade.yml +++ /dev/null @@ -1,215 +0,0 @@ ---- -- name: Update deployment type - hosts: OSEv3 - roles: - - openshift_facts - post_tasks: # technically tasks are run after roles, but post_tasks is a bit more explicit. - - openshift_facts: - role: common - local_facts: - deployment_type: "{{ deployment_type }}" - -- name: Verify upgrade can proceed - hosts: masters - tasks: - # Checking the global deployment type rather than host facts, this is about - # what the user is requesting. - - fail: msg="Deployment type enterprise not supported for upgrade" - when: deployment_type == "enterprise" - -- name: Backup etcd - hosts: masters - vars: - embedded_etcd: "{{ openshift.master.embedded_etcd }}" - timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" - roles: - - openshift_facts - tasks: - - stat: path=/var/lib/openshift - register: var_lib_openshift - - name: Create origin symlink if necessary - file: src=/var/lib/openshift/ dest=/var/lib/origin state=link - when: var_lib_openshift.stat.exists == True - - name: Check available disk space for etcd backup - # We assume to be using the data dir for all backups. - shell: > - df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1 - register: avail_disk - - - name: Check current embedded etcd disk usage - shell: > - du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1 - register: etcd_disk_usage - when: embedded_etcd | bool - - - name: Abort if insufficient disk space for etcd backup - fail: msg="{{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup, {{ avail_disk.stdout }} Kb available." - when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int) - - name: Install etcd (for etcdctl) - yum: pkg=etcd state=latest - - name: Generate etcd backup - command: etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }} --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }} - - name: Display location of etcd backup - debug: msg="Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}" - -- name: Upgrade base package on masters - hosts: masters - roles: - - openshift_facts - vars: - openshift_version: "{{ openshift_pkg_version | default('') }}" - tasks: - - name: Upgrade base package - yum: pkg={{ openshift.common.service_type }}{{ openshift_version }} state=latest - -- name: Evaluate oo_first_master - hosts: localhost - vars: - g_masters_group: "{{ 'masters' }}" - tasks: - - name: Evaluate oo_first_master - add_host: - name: "{{ groups[g_masters_group][0] }}" - groups: oo_first_master - ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" - ansible_sudo: "{{ g_sudo | default(omit) }}" - when: g_masters_group in groups and (groups[g_masters_group] | length) > 0 - -# TODO: ideally we would check the new version, without installing it. (some -# kind of yum repoquery? would need to handle openshift -> atomic-openshift -# package rename) -- name: Perform upgrade version checking - hosts: oo_first_master - tasks: - - name: Determine new version - command: > - rpm -q --queryformat '%{version}' {{ openshift.common.service_type }} - register: _new_version - -- name: Ensure AOS 3.0.2 or Origin 1.0.6 - hosts: oo_first_master - tasks: - fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later - when: _new_version.stdout | version_compare('1.0.6','<') or ( _new_version.stdout | version_compare('3.0','>=' and _new_version.stdout | version_compare('3.0.2','<') ) - -- name: Verify upgrade can proceed - hosts: oo_first_master - tasks: - # Checking the global deployment type rather than host facts, this is about - # what the user is requesting. - - fail: msg="Deployment type 'enterprise' must be updated to 'openshift-enterprise' for upgrade to proceed" - when: deployment_type == "enterprise" and (_new_version.stdout | version_compare('1.0.7', '>=') or _new_version.stdout | version_compare('3.1', '>=')) - -- name: Upgrade masters - hosts: masters - vars: - openshift_version: "{{ openshift_pkg_version | default('') }}" - tasks: - - name: Upgrade to latest available kernel - yum: pkg=kernel state=latest - - name: display just the deployment_type variable for the current host - debug: - var: hostvars[inventory_hostname] - - name: Upgrade master packages - command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }} - - name: Upgrade master configuration. - openshift_upgrade_config: from_version=3.0 to_version=3.1 role=master - - name: Restart master services - service: name="{{ openshift.common.service_type}}-master" state=restarted - -- name: Upgrade nodes - hosts: nodes - vars: - openshift_version: "{{ openshift_pkg_version | default('') }}" - roles: - - openshift_facts - tasks: - - name: Upgrade node packages - command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }} - - name: Restart node services - service: name="{{ openshift.common.service_type }}-node" state=restarted - -- name: Update cluster policy - hosts: oo_first_master - tasks: - - name: oadm policy reconcile-cluster-roles --confirm - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-roles --confirm - -- name: Update cluster policy bindings - hosts: oo_first_master - tasks: - - name: oadm policy reconcile-cluster-role-bindings --confirm - command: > - {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig - policy reconcile-cluster-role-bindings - --exclude-groups=system:authenticated - --exclude-groups=system:unauthenticated - --exclude-users=system:anonymous - --additive-only=true --confirm - when: ( _new_version.stdout | version_compare('1.0.6', '>') and _new_version.stdout | version_compare('3.0','<') ) or _new_version.stdout | version_compare('3.0.2','>') - -- name: Upgrade default router - hosts: oo_first_master - vars: - - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" - - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" - tasks: - - name: Check for default router - command: > - {{ oc_cmd }} get -n default dc/router - register: _default_router - failed_when: false - changed_when: false - - name: Check for allowHostNetwork and allowHostPorts - when: _default_router.rc == 0 - shell: > - {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork - register: _scc - - name: Grant allowHostNetwork and allowHostPorts - when: - - _default_router.rc == 0 - - "'false' in _scc.stdout" - command: > - {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9 - - name: Update deployment config to 1.0.4/3.0.1 spec - when: _default_router.rc == 0 - command: > - {{ oc_cmd }} patch dc/router -p - '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}' - - name: Switch to hostNetwork=true - when: _default_router.rc == 0 - command: > - {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}' - - name: Update router image to current version - when: _default_router.rc == 0 - command: > - {{ oc_cmd }} patch dc/router -p - '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}' - -- name: Upgrade default - hosts: oo_first_master - vars: - - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}" - - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig" - tasks: - - name: Check for default registry - command: > - {{ oc_cmd }} get -n default dc/docker-registry - register: _default_registry - failed_when: false - changed_when: false - - name: Update registry image to current version - when: _default_registry.rc == 0 - command: > - {{ oc_cmd }} patch dc/docker-registry -p - '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}' - -- name: Update image streams and templates - hosts: oo_first_master - vars: - openshift_examples_import_command: "update" - openshift_deployment_type: "{{ deployment_type }}" - roles: - - openshift_examples |