---
- name: Load master facts
  hosts: masters
  roles:
  - openshift_facts

- name: Verify upgrade can proceed
  hosts: masters[0]
  gather_facts: no
  tasks:
    # Checking the global deployment type rather than host facts, this is about
    # what the user is requesting.
    - fail: msg="Deployment type enterprise not supported for upgrade"
      when: deployment_type == "enterprise"

- name: Run pre-upgrade checks on first master
  hosts: masters[0]
  tasks:
  # If this script errors out ansible will show the default stdout/stderr
  # which contains details for the user:
  - script: files/pre-upgrade-check

- name: Evaluate etcd_hosts
  hosts: localhost
  tasks:
  - name: Evaluate etcd hosts
    add_host:
      name: "{{ groups.masters.0 }}"
      groups: etcd_hosts
    when: hostvars[groups.masters.0].openshift.master.embedded_etcd | bool
  - name: Evaluate etcd hosts
    add_host:
      name: "{{ item }}"
      groups: etcd_hosts
    with_items: groups.etcd
    when: not hostvars[groups.masters.0].openshift.master.embedded_etcd | bool

- name: Backup etcd
  hosts: etcd_hosts
  vars:
    embedded_etcd: "{{ openshift.master.embedded_etcd }}"
    timestamp: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
  roles:
  - openshift_facts
  tasks:

  - stat: path=/var/lib/openshift
    register: var_lib_openshift

  - name: Create origin symlink if necessary
    file: src=/var/lib/openshift/ dest=/var/lib/origin state=link
    when: var_lib_openshift.stat.exists == True

  - name: Check available disk space for etcd backup
    # We assume to be using the data dir for all backups.
    shell: >
      df --output=avail -k {{ openshift.common.data_dir }} | tail -n 1
    register: avail_disk

  - name: Check current embedded etcd disk usage
    shell: >
      du -k {{ openshift.master.etcd_data_dir }} | tail -n 1 | cut -f1
    register: etcd_disk_usage
    when: embedded_etcd | bool

  - name: Abort if insufficient disk space for etcd backup
    fail:
      msg: >
        {{ etcd_disk_usage.stdout }} Kb disk space required for etcd backup,
        {{ avail_disk.stdout }} Kb available.
    when: (embedded_etcd | bool) and (etcd_disk_usage.stdout|int > avail_disk.stdout|int)

  - name: Install etcd (for etcdctl)
    yum:
      pkg: etcd
      state: latest

  - name: Generate etcd backup
    command: >
      etcdctl backup --data-dir={{ openshift.master.etcd_data_dir }}
      --backup-dir={{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}

  - name: Display location of etcd backup
    debug:
      msg: "Etcd backup created in {{ openshift.common.data_dir }}/etcd-backup-{{ timestamp }}"

- name: Update deployment type
  hosts: OSEv3
  roles:
  - openshift_facts
  post_tasks:
  - openshift_facts:
      role: common
      local_facts:
        deployment_type: "{{ deployment_type }}"


- name: Perform upgrade version checking
  hosts: masters[0]
  tasks:
  - name: Clean yum cache
    command: yum clean all

  - name: Determine available versions
    script: files/versions.sh {{ openshift.common.service_type }} openshift
    register: g_versions_result

  - set_fact:
      g_aos_versions: "{{ g_versions_result.stdout | from_yaml }}"

  - set_fact:
      g_new_version: "{{ g_aos_versions.curr_version.split('-', 1).0 if g_aos_versions.avail_version is none else g_aos_versions.avail_version.split('-', 1).0 }}"

  - fail: This playbook requires Origin 1.0.6 or later
    when: deployment_type == 'origin' and g_aos_versions.curr_version | version_compare('1.0.6','<')

  - fail: This playbook requires Atomic OpenShift 3.0.2 or later
    when: deployment_type in ['openshift-enterprise', 'atomic-openshift'] and g_aos_versions.curr_version | version_compare('3.0.2','<')


- name: Upgrade masters
  hosts: masters
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  tasks:
    - name: Upgrade to latest available kernel
      yum:
        pkg: kernel
        state: latest

    - name: Upgrade master packages
      command: yum update -y {{ openshift.common.service_type }}-master{{ openshift_version }}

    - name: Ensure python-yaml present for config upgrade
      yum:
        pkg: python-yaml
        state: installed

    - debug: var=hostvars[inventory_hostname].openshift.common.config_base

    - name: Upgrade master configuration
      openshift_upgrade_config:
        from_version: '3.0'
        to_version: '3.1'
        role: master
        config_base: "{{ hostvars[inventory_hostname].openshift.common.config_base }}"

    - set_fact:
        master_certs_missing: True
        master_cert_subdir: master-{{ openshift.common.hostname }}
        master_cert_config_dir: "{{ openshift.common.config_base }}/master"

- name: Create temp directory for syncing certs
  hosts: localhost
  gather_facts: no
  tasks:
  - name: Create local temp directory for syncing certs
    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX
    register: g_master_mktemp
    changed_when: False

- name: Generate missing master certificates
  hosts: masters[0]
  vars:
    master_hostnames: "{{ hostvars
                          | oo_select_keys(groups.masters)
                          | oo_collect('openshift.common.all_hostnames')
                          | oo_flatten | unique }}"
    master_generated_certs_dir: "{{ openshift.common.config_base }}/generated-configs"
    masters_needing_certs: "{{ hostvars
                               | oo_select_keys(groups.masters)
                               | difference([groups.masters.0]) }}"
    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
    openshift_deployment_type: "{{ deployment_type }}"
  roles:
  - openshift_master_certificates
  post_tasks:
  - name: Remove generated etcd client certs when using external etcd
    file:
      path: "{{ master_generated_certs_dir }}/{{ item.0.master_cert_subdir }}/{{ item.1 }}"
      state: absent
    when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
    with_nested:
    - masters_needing_certs
    - - master.etcd-client.crt
      - master.etcd-client.key

  - name: Create a tarball of the master certs
    command: >
      tar -czvf {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz
        -C {{ master_generated_certs_dir }}/{{ item.master_cert_subdir }} .
    args:
      creates: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
    with_items: masters_needing_certs

  - name: Retrieve the master cert tarball from the master
    fetch:
      src: "{{ master_generated_certs_dir }}/{{ item.master_cert_subdir }}.tgz"
      dest: "{{ sync_tmpdir }}/"
      flat: yes
      fail_on_missing: yes
      validate_checksum: yes
    with_items: masters_needing_certs


- name: Sync certs and restart masters post configuration change
  hosts: masters
  vars:
    sync_tmpdir: "{{ hostvars.localhost.g_master_mktemp.stdout }}"
  tasks:
  - name: Unarchive the tarball on the master
    unarchive:
      src: "{{ sync_tmpdir }}/{{ master_cert_subdir }}.tgz"
      dest: "{{ master_cert_config_dir }}"
    when: inventory_hostname != groups.masters.0

  - name: Restart master services
    service: name="{{ openshift.common.service_type}}-master" state=restarted


- name: Delete temporary directory on localhost
  hosts: localhost
  gather_facts: no
  tasks:
  - file: name={{ g_master_mktemp.stdout }} state=absent
    changed_when: False


- name: Upgrade nodes
  hosts: nodes
  vars:
    openshift_version: "{{ openshift_pkg_version | default('') }}"
  roles:
  - openshift_facts
  tasks:
    - name: Upgrade node packages
      command: yum update -y {{ openshift.common.service_type }}-node{{ openshift_version }}
    - name: Restart node services
      service: name="{{ openshift.common.service_type }}-node" state=restarted

- name: Update cluster policy and policy bindings
  hosts: masters[0]
  vars:
    origin_reconcile_bindings: "{{ deployment_type == 'origin' and g_new_version | version_compare('1.0.6', '>') }}"
    ent_reconcile_bindings: "{{ deployment_type in ['openshift-enterprise', 'atomic-enterprise'] and g_new_version | version_compare('3.0.2','>') }}"
  tasks:
    - name: oadm policy reconcile-cluster-roles --confirm
      command: >
        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
        policy reconcile-cluster-roles --confirm

    - name: oadm policy reconcile-cluster-role-bindings --confirm
      command: >
        {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
        policy reconcile-cluster-role-bindings
        --exclude-groups=system:authenticated
        --exclude-groups=system:unauthenticated
        --exclude-users=system:anonymous
        --additive-only=true --confirm
      when: origin_reconcile_bindings | bool or ent_reconcile_bindings | bool


- name: Restart masters post reconcile
  hosts: masters
  tasks:
    - name: Restart master services
      service: name="{{ openshift.common.service_type}}-master" state=restarted


- name: Upgrade default router and registry
  hosts: masters[0]
  vars:
    - registry_image: "{{  openshift.master.registry_url | replace( '${component}', 'docker-registry' )  | replace ( '${version}', 'v' + g_new_version  ) }}"
    - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + g_new_version ) }}"
    - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
  tasks:
    - name: Check for default router
      command: >
        {{ oc_cmd }} get -n default dc/router
      register: _default_router
      failed_when: false
      changed_when: false
    - name: Check for allowHostNetwork and allowHostPorts
      when: _default_router.rc == 0
      shell: >
        {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
      register: _scc
    - name: Grant allowHostNetwork and allowHostPorts
      when:
        - _default_router.rc == 0
        - "'false' in _scc.stdout"
      command: >
        {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
    - name: Update deployment config to 1.0.4/3.0.1 spec
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p
        '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
    - name: Switch to hostNetwork=true
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
    - name: Update router image to current version
      when: _default_router.rc == 0
      command: >
        {{ oc_cmd }} patch dc/router -p
        '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'

    - name: Check for default registry
      command: >
          {{ oc_cmd }} get -n default dc/docker-registry
      register: _default_registry
      failed_when: false
      changed_when: false
    - name: Update registry image to current version
      when: _default_registry.rc == 0
      command: >
        {{ oc_cmd }} patch dc/docker-registry -p
        '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'

- name: Update image streams and templates
  hosts: masters[0]
  vars:
    openshift_examples_import_command: "update"
    openshift_deployment_type: "{{ deployment_type }}"
  roles:
    - openshift_examples