diff options
Diffstat (limited to 'playbooks')
36 files changed, 534 insertions, 225 deletions
| diff --git a/playbooks/adhoc/contiv/delete_contiv.yml b/playbooks/adhoc/contiv/delete_contiv.yml index 91948c72e..eec6c23a7 100644 --- a/playbooks/adhoc/contiv/delete_contiv.yml +++ b/playbooks/adhoc/contiv/delete_contiv.yml @@ -1,5 +1,5 @@  --- -- name: delete contiv +- name: Uninstall contiv    hosts: all    gather_facts: False    tasks: diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml index 97d835eae..ddd2ecebd 100644 --- a/playbooks/adhoc/uninstall.yml +++ b/playbooks/adhoc/uninstall.yml @@ -103,7 +103,7 @@          - atomic-openshift-sdn-ovs          - cockpit-bridge          - cockpit-docker -        - cockpit-shell +        - cockpit-system          - cockpit-ws          - kubernetes-client          - openshift @@ -317,6 +317,7 @@    - name: restart NetworkManager      service: name=NetworkManager state=restarted +    when: openshift_use_dnsmasq | default(true) | bool  - hosts: masters    become: yes @@ -346,7 +347,7 @@      - atomic-openshift-master      - cockpit-bridge      - cockpit-docker -    - cockpit-shell +    - cockpit-system      - cockpit-ws      - corosync      - kubernetes-client diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-cfme/config.yml new file mode 100644 index 000000000..0e8e7a94d --- /dev/null +++ b/playbooks/byo/openshift-cfme/config.yml @@ -0,0 +1,8 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml +  tags: +    - always + +- include: ../../common/openshift-cluster/evaluate_groups.yml + +- include: ../../common/openshift-cfme/config.yml diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..c8ed16859 --- /dev/null +++ b/playbooks/byo/openshift-cfme/uninstall.yml @@ -0,0 +1,6 @@ +--- +# - include: ../openshift-cluster/initialize_groups.yml +#   tags: +#     - always + +- include: ../../common/openshift-cfme/uninstall.yml diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml index fd4a9eb26..acf5469bf 100644 --- a/playbooks/byo/openshift-cluster/config.yml +++ b/playbooks/byo/openshift-cluster/config.yml @@ -3,19 +3,6 @@    tags:    - always -- name: Verify Requirements -  hosts: OSEv3 -  roles: -  - openshift_health_checker -  vars: -  - r_openshift_health_checker_playbook_context: "install" -  post_tasks: -  - action: openshift_health_check -    args: -      checks: -      - disk_availability -      - memory_availability -  - include: ../../common/openshift-cluster/std_include.yml    tags:    - always diff --git a/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml new file mode 100644 index 000000000..29f821eda --- /dev/null +++ b/playbooks/byo/openshift-cluster/redeploy-etcd-ca.yml @@ -0,0 +1,10 @@ +--- +- include: initialize_groups.yml +  tags: +  - always + +- include: ../../common/openshift-cluster/std_include.yml +  tags: +  - always + +- include: ../../common/openshift-cluster/redeploy-certificates/etcd-ca.yml diff --git a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml index 3b33e0d6f..6e11a111b 100644 --- a/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml +++ b/playbooks/byo/openshift-cluster/redeploy-openshift-ca.yml @@ -7,4 +7,4 @@    tags:    - always -- include: ../../common/openshift-cluster/redeploy-certificates/ca.yml +- include: ../../common/openshift-cluster/redeploy-certificates/openshift-ca.yml diff --git a/playbooks/byo/openshift-cluster/service-catalog.yml b/playbooks/byo/openshift-cluster/service-catalog.yml new file mode 100644 index 000000000..a9fc18958 --- /dev/null +++ b/playbooks/byo/openshift-cluster/service-catalog.yml @@ -0,0 +1,12 @@ +--- +# +# This playbook is a preview of upcoming changes for installing +# Hosted logging on.  See inventory/byo/hosts.*.example for the +# currently supported method. +# +- include: initialize_groups.yml + +- include: ../../common/openshift-cluster/service_catalog.yml +  vars: +    openshift_cluster_id: "{{ cluster_id | default('default') }}" +    openshift_debug_level: "{{ debug_level | default(2) }}" diff --git a/playbooks/byo/openshift-etcd/migrate.yml b/playbooks/byo/openshift-etcd/migrate.yml new file mode 100644 index 000000000..fd02e066e --- /dev/null +++ b/playbooks/byo/openshift-etcd/migrate.yml @@ -0,0 +1,124 @@ +--- +- include: ../openshift-cluster/initialize_groups.yml +  tags: +  - always + +- include: ../../common/openshift-cluster/evaluate_groups.yml +  tags: +  - always + +- name: Run pre-checks +  hosts: oo_etcd_to_config +  tags: +  - always +  roles: +  - role: etcd_migrate +    r_etcd_migrate_action: check +    etcd_peer: "{{ ansible_default_ipv4.address }}" + +# TODO(jchaloup): replace the std_include with something minimal so the entire playbook is faster +# e.g. I don't need to detect the OCP version, install deps, etc. +- include: ../../common/openshift-cluster/std_include.yml +  tags: +  - always + +- name: Backup v2 data +  hosts: oo_etcd_to_config +  gather_facts: no +  tags: +  - always +  roles: +  - role: openshift_facts +  - role: etcd_common +    r_etcd_common_action: backup +    r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" +    r_etcd_common_backup_tag: pre-migration +    r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" +    r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" + +- name: Gate on etcd backup +  hosts: localhost +  connection: local +  become: no +  tasks: +  - set_fact: +      etcd_backup_completed: "{{ hostvars +                                 | oo_select_keys(groups.oo_etcd_to_config) +                                 | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}" +  - set_fact: +      etcd_backup_failed: "{{ groups.oo_etcd_to_config | difference(etcd_backup_completed) }}" +  - fail: +      msg: "Migration cannot continue. The following hosts did not complete etcd backup: {{ etcd_backup_failed | join(',') }}" +    when: +    - etcd_backup_failed | length > 0 + +- name: Prepare masters for etcd data migration +  hosts: oo_masters_to_config +  tasks: +  - set_fact: +      master_services: +      - "{{ openshift.common.service_type + '-master' }}" +  - set_fact: +      master_services: +      - "{{ openshift.common.service_type + '-master-controllers' }}" +      - "{{ openshift.common.service_type + '-master-api' }}" +    when: +    - (openshift_master_cluster_method is defined and openshift_master_cluster_method == "native") or openshift.common.is_master_system_container | bool +  - debug: +      msg: "master service name: {{ master_services }}" +  - name: Stop masters +    service: +      name: "{{ item }}" +      state: stopped +    with_items: "{{ master_services }}" + +- name: Migrate etcd data from v2 to v3 +  hosts: oo_etcd_to_config +  gather_facts: no +  tags: +  - always +  roles: +  - role: etcd_migrate +    r_etcd_migrate_action: migrate +    etcd_peer: "{{ ansible_default_ipv4.address }}" + +- name: Gate on etcd migration +  hosts: oo_masters_to_config +  gather_facts: no +  tasks: +  - set_fact: +      etcd_migration_completed: "{{ hostvars +                                 | oo_select_keys(groups.oo_etcd_to_config) +                                 | oo_collect('inventory_hostname', {'r_etcd_migrate_success': true}) }}" +  - set_fact: +      etcd_migration_failed: "{{ groups.oo_etcd_to_config | difference(etcd_migration_completed) }}" + +- name: Configure masters if etcd data migration is succesfull +  hosts: oo_masters_to_config +  roles: +  - role: etcd_migrate +    r_etcd_migrate_action: configure +    when: etcd_migration_failed | length == 0 +  tasks: +  - debug: +      msg: "Skipping master re-configuration since migration failed." +    when: +    - etcd_migration_failed | length > 0 + +- name: Start masters after etcd data migration +  hosts: oo_masters_to_config +  tasks: +  - name: Start master services +    service: +      name: "{{ item }}" +      state: started +    register: service_status +    # Sometimes the master-api, resp. master-controllers fails to start for the first time +    until: service_status.state is defined and service_status.state == "started" +    retries: 5 +    delay: 10 +    with_items: "{{ master_services[::-1] }}" +  - fail: +      msg: "Migration failed. The following hosts were not properly migrated: {{ etcd_migration_failed | join(',') }}" +    when: +    - etcd_migration_failed | length > 0 diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml new file mode 100644 index 000000000..533a35d9e --- /dev/null +++ b/playbooks/common/openshift-cfme/config.yml @@ -0,0 +1,44 @@ +--- +# TODO: Make this work. The 'name' variable below is undefined +# presently because it's part of the cfme role. This play can't run +# until that's re-worked. +# +# - name: Pre-Pull manageiq-pods docker images +#   hosts: nodes +#   tasks: +#   - name: Ensure the latest manageiq-pods docker image is pulling +#     docker_image: +#       name: "{{ openshift_cfme_container_image }}" +#     # Fire-and-forget method, never timeout +#     async: 99999999999 +#     # F-a-f, never check on this. True 'background' task. +#     poll: 0 + +- name: Configure Masters for CFME Bulk Image Imports +  hosts: oo_masters_to_config +  serial: 1 +  tasks: +  - name: Run master cfme tuning playbook +    include_role: +      name: openshift_cfme +      tasks_from: tune_masters + +- name: Setup CFME +  hosts: oo_first_master +  vars: +    r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}" +  pre_tasks: +  - name: Create a temporary place to evaluate the PV templates +    command: mktemp -d /tmp/openshift-ansible-XXXXXXX +    register: r_openshift_cfme_mktemp +    changed_when: false +  - name: Ensure the server template was read from disk +    debug: +      msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}" + +  tasks: +  - name: Run the CFME Setup Role +    include_role: +      name: openshift_cfme +    vars: +      template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}" diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-cfme/filter_plugins new file mode 120000 index 000000000..99a95e4ca --- /dev/null +++ b/playbooks/common/openshift-cfme/filter_plugins @@ -0,0 +1 @@ +../../../filter_plugins
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-cfme/library new file mode 120000 index 000000000..ba40d2f56 --- /dev/null +++ b/playbooks/common/openshift-cfme/library @@ -0,0 +1 @@ +../../../library
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/common/openshift-cfme/roles new file mode 120000 index 000000000..20c4c58cf --- /dev/null +++ b/playbooks/common/openshift-cfme/roles @@ -0,0 +1 @@ +../../../roles
\ No newline at end of file diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-cfme/uninstall.yml new file mode 100644 index 000000000..78b8e7668 --- /dev/null +++ b/playbooks/common/openshift-cfme/uninstall.yml @@ -0,0 +1,8 @@ +--- +- name: Uninstall CFME +  hosts: masters +  tasks: +  - name: Run the CFME Uninstall Role Tasks +    include_role: +      name: openshift_cfme +      tasks_from: uninstall diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml index 1bee460e8..c7766ff04 100644 --- a/playbooks/common/openshift-checks/health.yml +++ b/playbooks/common/openshift-checks/health.yml @@ -1,4 +1,9 @@  --- +# openshift_health_checker depends on openshift_version which now requires group eval. +- include: ../openshift-cluster/evaluate_groups.yml +  tags: +  - always +  - name: Run OpenShift health checks    hosts: OSEv3    roles: diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml index e01c6f38d..7ca9f7e8b 100644 --- a/playbooks/common/openshift-checks/pre-install.yml +++ b/playbooks/common/openshift-checks/pre-install.yml @@ -1,4 +1,9 @@  --- +# openshift_health_checker depends on openshift_version which now requires group eval. +- include: ../openshift-cluster/evaluate_groups.yml +  tags: +  - always +  - hosts: OSEv3    name: run OpenShift pre-install checks    roles: diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml index 1482b3a3f..7224ae712 100644 --- a/playbooks/common/openshift-cluster/config.yml +++ b/playbooks/common/openshift-cluster/config.yml @@ -1,4 +1,23 @@  --- +# TODO: refactor this into its own include +# and pass a variable for ctx +- name: Verify Requirements +  hosts: oo_all_hosts +  roles: +  - openshift_health_checker +  vars: +  - r_openshift_health_checker_playbook_context: "install" +  post_tasks: +  - action: openshift_health_check +    args: +      checks: +      - disk_availability +      - memory_availability +      - package_availability +      - package_version +      - docker_image_availability +      - docker_storage +  - include: initialize_oo_option_facts.yml    tags:    - always @@ -45,6 +64,12 @@    tags:    - hosted +- include: service_catalog.yml +  when: +  - openshift_enable_service_catalog | default(false) | bool +  tags: +  - servicecatalog +  - name: Re-enable excluder if it was previously enabled    hosts: oo_masters_to_config:oo_nodes_to_config    tags: diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml index 46932b27f..c28ce4c14 100644 --- a/playbooks/common/openshift-cluster/evaluate_groups.yml +++ b/playbooks/common/openshift-cluster/evaluate_groups.yml @@ -155,5 +155,5 @@        groups: oo_glusterfs_to_config        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"        ansible_become: "{{ g_sudo | default(omit) }}" -    with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts) | default([]) }}" +    with_items: "{{ g_glusterfs_hosts | union(g_glusterfs_registry_hosts | default([])) }}"      changed_when: no diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml new file mode 100644 index 000000000..6964e8567 --- /dev/null +++ b/playbooks/common/openshift-cluster/redeploy-certificates/etcd-ca.yml @@ -0,0 +1,158 @@ +--- +- name: Check cert expirys +  hosts: oo_etcd_to_config:oo_masters_to_config +  vars: +    openshift_certificate_expiry_show_all: yes +  roles: +  # Sets 'check_results' per host which contains health status for +  # etcd, master and node certificates.  We will use 'check_results' +  # to determine if any certificates were expired prior to running +  # this playbook. Service restarts will be skipped if any +  # certificates were previously expired. +  - role: openshift_certificate_expiry + +- name: Backup existing etcd CA certificate directories +  hosts: oo_etcd_to_config +  roles: +  - role: etcd_common +    r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" +  tasks: +  - name: Determine if CA certificate directory exists +    stat: +      path: "{{ etcd_ca_dir }}" +    register: etcd_ca_certs_dir_stat +  - name: Backup generated etcd certificates +    command: > +      tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz +      {{ etcd_ca_dir }} +    args: +      warn: no +    when: etcd_ca_certs_dir_stat.stat.exists | bool +  - name: Remove CA certificate directory +    file: +      path: "{{ etcd_ca_dir }}" +      state: absent +    when: etcd_ca_certs_dir_stat.stat.exists | bool + +- name: Generate new etcd CA +  hosts: oo_first_etcd +  roles: +  - role: openshift_etcd_ca +    etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" +    etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" +    etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" + +- name: Create temp directory for syncing certs +  hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - name: Create local temp directory for syncing certs +    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX +    register: g_etcd_mktemp +    changed_when: false + +- name: Distribute etcd CA to etcd hosts +  hosts: oo_etcd_to_config +  vars: +    etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" +  roles: +  - role: etcd_common +    r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" +  tasks: +  - name: Create a tarball of the etcd ca certs +    command: > +      tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz +        -C {{ etcd_ca_dir }} . +    args: +      creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" +      warn: no +    delegate_to: "{{ etcd_ca_host }}" +    run_once: true +  - name: Retrieve etcd ca cert tarball +    fetch: +      src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" +      dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" +      flat: yes +      fail_on_missing: yes +      validate_checksum: yes +    delegate_to: "{{ etcd_ca_host }}" +    run_once: true +  - name: Ensure ca directory exists +    file: +      path: "{{ etcd_ca_dir }}" +      state: directory +  - name: Unarchive etcd ca cert tarballs +    unarchive: +      src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" +      dest: "{{ etcd_ca_dir }}" +  - name: Read current etcd CA +    slurp: +      src: "{{ etcd_conf_dir }}/ca.crt" +    register: g_current_etcd_ca_output +  - name: Read new etcd CA +    slurp: +      src: "{{ etcd_ca_dir }}/ca.crt" +    register: g_new_etcd_ca_output +  - copy: +      content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" +      dest: "{{ item }}/ca.crt" +    with_items: +    - "{{ etcd_conf_dir }}" +    - "{{ etcd_ca_dir }}" + +- include: ../../openshift-etcd/restart.yml +  # Do not restart etcd when etcd certificates were previously expired. +  when: ('expired' not in (hostvars +                           | oo_select_keys(groups['etcd']) +                           | oo_collect('check_results.check_results.etcd') +                           | oo_collect('health'))) + +- name: Retrieve etcd CA certificate +  hosts: oo_first_etcd +  roles: +  - role: etcd_common +    r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" +  tasks: +  - name: Retrieve etcd CA certificate +    fetch: +      src: "{{ etcd_conf_dir }}/ca.crt" +      dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" +      flat: yes +      fail_on_missing: yes +      validate_checksum: yes + +- name: Distribute etcd CA to masters +  hosts: oo_masters_to_config +  vars: +    openshift_ca_host: "{{ groups.oo_first_master.0 }}" +  tasks: +  - name: Deploy etcd CA +    copy: +      src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" +      dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" +    when: groups.oo_etcd_to_config | default([]) | length > 0 + +- name: Delete temporary directory on localhost +  hosts: localhost +  connection: local +  become: no +  gather_facts: no +  tasks: +  - file: +      name: "{{ g_etcd_mktemp.stdout }}" +      state: absent +    changed_when: false + +- include: ../../openshift-master/restart.yml +  # Do not restart masters when master certificates were previously expired. +  when: ('expired' not in hostvars +                       | oo_select_keys(groups['oo_masters_to_config']) +                       | oo_collect('check_results.check_results.ocp_certs') +                       | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"})) +        and +        ('expired' not in hostvars +                          | oo_select_keys(groups['oo_masters_to_config']) +                          | oo_collect('check_results.check_results.ocp_certs') +                          | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"})) diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml index 0d94a011a..089ae6bbc 100644 --- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml +++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml @@ -7,7 +7,7 @@      when: not openshift.common.version_gte_3_2_or_1_2 | bool  - name: Check cert expirys -  hosts: oo_nodes_to_config:oo_etcd_to_config:oo_masters_to_config +  hosts: oo_nodes_to_config:oo_masters_to_config    vars:      openshift_certificate_expiry_show_all: yes    roles: @@ -18,140 +18,6 @@    # certificates were previously expired.    - role: openshift_certificate_expiry -- name: Backup existing etcd CA certificate directories -  hosts: oo_etcd_to_config -  roles: -  - role: etcd_common -    r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" -  tasks: -  - name: Determine if CA certificate directory exists -    stat: -      path: "{{ etcd_ca_dir }}" -    register: etcd_ca_certs_dir_stat -  - name: Backup generated etcd certificates -    command: > -      tar -czf {{ etcd_conf_dir }}/etcd-ca-certificate-backup-{{ ansible_date_time.epoch }}.tgz -      {{ etcd_ca_dir }} -    args: -      warn: no -    when: etcd_ca_certs_dir_stat.stat.exists | bool -  - name: Remove CA certificate directory -    file: -      path: "{{ etcd_ca_dir }}" -      state: absent -    when: etcd_ca_certs_dir_stat.stat.exists | bool - -- name: Generate new etcd CA -  hosts: oo_first_etcd -  roles: -  - role: openshift_etcd_ca -    etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}" -    etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" -    etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}" - -- name: Create temp directory for syncing certs -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - name: Create local temp directory for syncing certs -    local_action: command mktemp -d /tmp/openshift-ansible-XXXXXXX -    register: g_etcd_mktemp -    changed_when: false - -- name: Distribute etcd CA to etcd hosts -  hosts: oo_etcd_to_config -  vars: -    etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" -  roles: -  - role: etcd_common -    r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" -  tasks: -  - name: Create a tarball of the etcd ca certs -    command: > -      tar -czvf {{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz -        -C {{ etcd_ca_dir }} . -    args: -      creates: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" -      warn: no -    delegate_to: "{{ etcd_ca_host }}" -    run_once: true -  - name: Retrieve etcd ca cert tarball -    fetch: -      src: "{{ etcd_conf_dir }}/{{ etcd_ca_name }}.tgz" -      dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" -      flat: yes -      fail_on_missing: yes -      validate_checksum: yes -    delegate_to: "{{ etcd_ca_host }}" -    run_once: true -  - name: Ensure ca directory exists -    file: -      path: "{{ etcd_ca_dir }}" -      state: directory -  - name: Unarchive etcd ca cert tarballs -    unarchive: -      src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/{{ etcd_ca_name }}.tgz" -      dest: "{{ etcd_ca_dir }}" -  - name: Read current etcd CA -    slurp: -      src: "{{ etcd_conf_dir }}/ca.crt" -    register: g_current_etcd_ca_output -  - name: Read new etcd CA -    slurp: -      src: "{{ etcd_ca_dir }}/ca.crt" -    register: g_new_etcd_ca_output -  - copy: -      content: "{{ (g_new_etcd_ca_output.content|b64decode) + (g_current_etcd_ca_output.content|b64decode) }}" -      dest: "{{ item }}/ca.crt" -    with_items: -    - "{{ etcd_conf_dir }}" -    - "{{ etcd_ca_dir }}" - -- name: Retrieve etcd CA certificate -  hosts: oo_first_etcd -  roles: -  - role: etcd_common -    r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" -  tasks: -  - name: Retrieve etcd CA certificate -    fetch: -      src: "{{ etcd_conf_dir }}/ca.crt" -      dest: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/" -      flat: yes -      fail_on_missing: yes -      validate_checksum: yes - -- name: Distribute etcd CA to masters -  hosts: oo_masters_to_config -  vars: -    openshift_ca_host: "{{ groups.oo_first_master.0 }}" -  tasks: -  - name: Deploy CA certificate, key, bundle and serial -    copy: -      src: "{{ hostvars['localhost'].g_etcd_mktemp.stdout }}/ca.crt" -      dest: "{{ openshift.common.config_base }}/master/master.etcd-ca.crt" -    when: groups.oo_etcd_to_config | default([]) | length > 0 - -- name: Delete temporary directory on localhost -  hosts: localhost -  connection: local -  become: no -  gather_facts: no -  tasks: -  - file: -      name: "{{ g_etcd_mktemp.stdout }}" -      state: absent -    changed_when: false - -- include: ../../openshift-etcd/restart.yml -  # Do not restart etcd when etcd certificates were previously expired. -  when: ('expired' not in (hostvars -                           | oo_select_keys(groups['etcd']) -                           | oo_collect('check_results.check_results.etcd') -                           | oo_collect('health'))) -  # Update master config when ca-bundle not referenced. Services will be  # restarted below after new CA certificate has been distributed.  - name: Ensure ca-bundle.crt is referenced in master configuration diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml new file mode 100644 index 000000000..c42e8781a --- /dev/null +++ b/playbooks/common/openshift-cluster/service_catalog.yml @@ -0,0 +1,8 @@ +--- +- include: evaluate_groups.yml + +- name: Service Catalog +  hosts: oo_first_master +  roles: +  - openshift_service_catalog +  - ansible_service_broker diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml index b7fd2c0c5..616ba04f8 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/backup.yml @@ -3,12 +3,12 @@    hosts: oo_etcd_hosts_to_backup    roles:    - role: openshift_facts -  - role: etcd_upgrade -    r_etcd_upgrade_action: backup -    r_etcd_backup_tag: etcd_backup_tag +  - role: etcd_common +    r_etcd_common_action: backup +    r_etcd_common_backup_tag: etcd_backup_tag      r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}" -    r_etcd_upgrade_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" -    r_etcd_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}" +    r_etcd_common_embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}" +    r_etcd_common_backup_sufix_name: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"  - name: Gate on etcd backup    hosts: localhost @@ -18,7 +18,7 @@    - set_fact:        etcd_backup_completed: "{{ hostvars                                   | oo_select_keys(groups.oo_etcd_hosts_to_backup) -                                 | oo_collect('inventory_hostname', {'r_etcd_upgrade_backup_complete': true}) }}" +                                 | oo_collect('inventory_hostname', {'r_etcd_common_backup_complete': true}) }}"    - set_fact:        etcd_backup_failed: "{{ groups.oo_etcd_hosts_to_backup | difference(etcd_backup_completed) }}"    - fail: diff --git a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml index 3e01883ae..64abc54e7 100644 --- a/playbooks/common/openshift-cluster/upgrades/etcd/main.yml +++ b/playbooks/common/openshift-cluster/upgrades/etcd/main.yml @@ -16,7 +16,8 @@    tasks:    - include_role:        name: etcd_common -      tasks_from: etcdctl.yml +    vars: +      r_etcd_common_action: drop_etcdctl  - name: Perform etcd upgrade    include: ./upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml index 046535680..72de63070 100644 --- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml @@ -6,27 +6,32 @@    - lib_openshift    tasks: -  - name: Retrieve list of openshift nodes matching upgrade label -    oc_obj: -      state: list -      kind: node -      selector: "{{ openshift_upgrade_nodes_label }}" -    register: nodes_to_upgrade -    when: openshift_upgrade_nodes_label is defined +  - when: openshift_upgrade_nodes_label is defined +    block: +    - name: Retrieve list of openshift nodes matching upgrade label +      oc_obj: +        state: list +        kind: node +        selector: "{{ openshift_upgrade_nodes_label }}" +      register: nodes_to_upgrade -  # We got a list of nodes with the label, now we need to match these with inventory hosts -  # using their openshift.common.hostname fact. -  - name: Map labelled nodes to inventory hosts -    add_host: -      name: "{{ item }}" -      groups: temp_nodes_to_upgrade -      ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" -      ansible_become: "{{ g_sudo | default(omit) }}" -    with_items: " {{ groups['oo_nodes_to_config'] }}" -    when: -    - openshift_upgrade_nodes_label is defined -    - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list -    changed_when: false +    - name: Fail if no nodes match openshift_upgrade_nodes_label +      fail: +        msg: "openshift_upgrade_nodes_label was specified but no nodes matched" +      when: nodes_to_upgrade.results.results[0]['items'] | length == 0 + +    # We got a list of nodes with the label, now we need to match these with inventory hosts +    # using their openshift.common.hostname fact. +    - name: Map labelled nodes to inventory hosts +      add_host: +        name: "{{ item }}" +        groups: temp_nodes_to_upgrade +        ansible_ssh_user: "{{ g_ssh_user | default(omit) }}" +        ansible_become: "{{ g_sudo | default(omit) }}" +      with_items: " {{ groups['oo_nodes_to_config'] }}" +      when: +      - hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list +      changed_when: false    # Build up the oo_nodes_to_upgrade group, use the list filtered by label if    # present, otherwise hit all nodes: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index b980909eb..6738ce11f 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -3,6 +3,16 @@  # Upgrade Masters  ############################################################################### +# oc adm migrate storage should be run prior to etcd v3 upgrade +# See: https://github.com/openshift/origin/pull/14625#issuecomment-308467060 +- name: Pre master upgrade - Upgrade job storage +  hosts: oo_first_master +  tasks: +  - name: Upgrade job storage +    command: > +      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      migrate storage --include=jobs --confirm +  # If facts cache were for some reason deleted, this fact may not be set, and if not set  # it will always default to true. This causes problems for the etcd data dir fact detection  # so we must first make sure this is set correctly before attempting the backup. @@ -133,6 +143,14 @@    - set_fact:        master_update_complete: True +- name: Post master upgrade - Upgrade job storage +  hosts: oo_first_master +  tasks: +  - name: Upgrade job storage +    command: > +      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig +      migrate storage --include=jobs --confirm +  ##############################################################################  # Gate on master update complete  ############################################################################## @@ -278,6 +296,7 @@    - openshift_facts    - docker    - openshift_node_upgrade +  - openshift_node_dnsmasq    post_tasks:    - name: Set node schedulability diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml index 91dbc2cd4..35a50cf4e 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml @@ -34,6 +34,7 @@    - openshift_facts    - docker    - openshift_node_upgrade +  - openshift_node_dnsmasq    - role: openshift_excluder      r_openshift_excluder_action: enable      r_openshift_excluder_service_type: "{{ openshift.common.service_type }}" diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'admissionConfig.pluginConfig' +    yaml_value: "{{ openshift.master.admission_plugin_config }}" +  when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'admissionConfig.pluginOrderOverride' +    yaml_value: + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'kubernetesMasterConfig.admissionConfig' +    yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage -  hosts: oo_first_master -  roles: -  - { role: openshift_cli } -  vars: -    # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe -    # restart. -    skip_docker_role: True -  tasks: -  - name: Upgrade job storage -    command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig -      migrate storage --include=jobs --confirm -    run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml index e63b03e51..4e7c14e94 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml @@ -115,5 +115,3 @@  - include: ../upgrade_nodes.yml  - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml index 21e1d440d..45b664d06 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml @@ -115,7 +115,7 @@    - include: ../cleanup_unused_images.yml  - include: ../upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_5/master_config_upgrade.yml"  - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml new file mode 100644 index 000000000..ed89dbe8d --- /dev/null +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml @@ -0,0 +1,16 @@ +--- +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'admissionConfig.pluginConfig' +    yaml_value: "{{ openshift.master.admission_plugin_config }}" +  when: "'admission_plugin_config' in openshift.master" + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'admissionConfig.pluginOrderOverride' +    yaml_value: + +- modify_yaml: +    dest: "{{ openshift.common.config_base}}/master/master-config.yaml" +    yaml_key: 'kubernetesMasterConfig.admissionConfig' +    yaml_value: diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml deleted file mode 100644 index 48c69eccd..000000000 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/storage_upgrade.yml +++ /dev/null @@ -1,18 +0,0 @@ ---- -############################################################################### -# Post upgrade - Upgrade job storage -############################################################################### -- name: Upgrade job storage -  hosts: oo_first_master -  roles: -  - { role: openshift_cli } -  vars: -    # Another spot where we assume docker is running and do not want to accidentally trigger an unsafe -    # restart. -    skip_docker_role: True -  tasks: -  - name: Upgrade job storage -    command: > -      {{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig -      migrate storage --include=jobs --confirm -    run_once: true diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml index 5d41b84d0..5b9ac9e8f 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml @@ -115,5 +115,3 @@  - include: ../upgrade_nodes.yml  - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml index e34259b00..a470c7595 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml @@ -115,7 +115,7 @@    - include: ../cleanup_unused_images.yml  - include: ../upgrade_control_plane.yml +  vars: +    master_config_hook: "v3_6/master_config_upgrade.yml"  - include: ../post_control_plane.yml - -- include: storage_upgrade.yml diff --git a/playbooks/common/openshift-etcd/restart.yml b/playbooks/common/openshift-etcd/restart.yml index 196c86f28..af1ef245a 100644 --- a/playbooks/common/openshift-etcd/restart.yml +++ b/playbooks/common/openshift-etcd/restart.yml @@ -5,5 +5,5 @@    tasks:      - name: restart etcd        service: -        name: "{{ 'etcd' if not openshift.common.is_containerized | bool else 'etcd_container' }}" +        name: "{{ 'etcd_container' if openshift.common.etcd_runtime == 'docker' else 'etcd' }}"          state: restarted diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml index ddc4db8f8..70108fb7a 100644 --- a/playbooks/common/openshift-master/config.yml +++ b/playbooks/common/openshift-master/config.yml @@ -20,6 +20,25 @@      - node      - .config_managed +  - name: Check for existing configuration +    stat: +      path: /etc/origin/master/master-config.yaml +    register: master_config_stat + +  - name: Set clean install fact +    set_fact: +      l_clean_install: "{{ not master_config_stat.stat.exists | bool }}" + +  - name: Determine if etcd3 storage is in use +    command: grep  -Pzo  "storage-backend:\n.*etcd3" /etc/origin/master/master-config.yaml -q +    register: etcd3_grep +    failed_when: false +    changed_when: false + +  - name: Set etcd3 fact +    set_fact: +      l_etcd3_enabled: "{{ etcd3_grep.rc == 0 | bool }}" +    - set_fact:        openshift_master_pod_eviction_timeout: "{{ lookup('oo_option', 'openshift_master_pod_eviction_timeout') | default(none, true) }}"      when: openshift_master_pod_eviction_timeout is not defined @@ -122,6 +141,8 @@      etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"      etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"      etcd_cert_prefix: "master.etcd-" +    r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}" +    r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}"    - role: nuage_master      when: openshift.common.use_nuage | bool    - role: calico_master | 
