diff options
23 files changed, 254 insertions, 111 deletions
| diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index bf76a3913..1266921a6 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.9.0-0.39.0 ./ +3.9.0-0.41.0 ./ diff --git a/openshift-ansible.spec b/openshift-ansible.spec index 1ec707543..ae0104b27 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@  Name:           openshift-ansible  Version:        3.9.0 -Release:        0.39.0%{?dist} +Release:        0.41.0%{?dist}  Summary:        Openshift and Atomic Enterprise Ansible  License:        ASL 2.0  URL:            https://github.com/openshift/openshift-ansible @@ -201,6 +201,20 @@ Atomic OpenShift Utilities includes  %changelog +* Wed Feb 07 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.41.0 +- Allow OVS 2.7 in OCP 3.10 (sdodson@redhat.com) +- GlusterFS: Minor documentation update (jarrpa@redhat.com) +- Make sure to include upgrade_pre when upgrading master nodes +  (sdodson@redhat.com) + +* Wed Feb 07 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.40.0 +- health checks: tolerate ovs 2.9 (lmeyer@redhat.com) +- Fix docker rpm upgrade install task wording (mgugino@redhat.com) +- Initial support for 3.10 (sdodson@redhat.com) +- add deprovisioning for ELB (and IAM certs) (jdiaz@redhat.com) +- [6632] fix indentation of terminationGracePeriodSeconds var +  (jsanda@redhat.com) +  * Tue Feb 06 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.39.0  - Update code to not fail when rc != 0 (kwoodson@redhat.com)  - Upgrades: pass openshift_manage_node_is_master to master nodes during upgrade diff --git a/playbooks/aws/openshift-cluster/uninstall_elb.yml b/playbooks/aws/openshift-cluster/uninstall_elb.yml new file mode 100644 index 000000000..c1b724f0c --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_elb.yml @@ -0,0 +1,9 @@ +--- +- name: Delete elb +  hosts: localhost +  connection: local +  tasks: +  - name: deprovision elb +    include_role: +      name: openshift_aws +      tasks_from: uninstall_elb.yml diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index 40e245d75..3c0b72832 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -311,6 +311,9 @@    post_tasks:    - import_role:        name: openshift_node +      tasks_from: upgrade_pre.yml +  - import_role: +      name: openshift_node        tasks_from: upgrade.yml    - import_role:        name: openshift_manage_node diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index ac1b633b7..9c7677f1b 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -126,8 +126,8 @@    - name: Restart master controllers to force new leader election mode      service:        name: "{{ openshift_service_type }}-master-controllers" -      state: restart -    when: openshift.common.rolling_restart_mode == 'service' +      state: restarted +    when: openshift.common.rolling_restart_mode == 'services'    - name: Re-enable master controllers to force new leader election mode      service:        name: "{{ openshift_service_type }}-master-controllers" diff --git a/playbooks/openshift-prometheus/private/uninstall.yml b/playbooks/openshift-prometheus/private/uninstall.yml index 2df39c2a8..b01f7f988 100644 --- a/playbooks/openshift-prometheus/private/uninstall.yml +++ b/playbooks/openshift-prometheus/private/uninstall.yml @@ -5,4 +5,4 @@    - name: Run the Prometheus Uninstall Role Tasks      include_role:        name: openshift_prometheus -      tasks_from: uninstall +      tasks_from: uninstall_prometheus diff --git a/roles/openshift_aws/tasks/elb.yml b/roles/openshift_aws/tasks/elb.yml index d8257cf31..3eb7b73b3 100644 --- a/roles/openshift_aws/tasks/elb.yml +++ b/roles/openshift_aws/tasks/elb.yml @@ -2,26 +2,8 @@  - name: "dump the elb listeners for {{ l_elb_dict_item.key }}"    debug:      msg: "{{ l_elb_dict_item.value }}" +    verbosity: 1 -- name: "Create ELB {{ l_elb_dict_item.key }}" -  ec2_elb_lb: -    name: "{{ item.value.name }}" -    state: present -    cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}" -    security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" -    idle_timeout: "{{ item.value.idle_timout }}" -    region: "{{ openshift_aws_region }}" -    subnets: -    - "{{ subnetout.subnets[0].id }}" -    health_check: "{{ item.value.health_check }}" -    listeners: "{{ item.value.listeners }}" -    scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" -    tags: "{{ item.value.tags }}" -    wait: True -  register: new_elb +- name: Create ELB(s) +  include_tasks: elb_single.yml    with_dict: "{{ l_elb_dict_item.value }}" - -- debug: -    msg: "{{ item }}" -  with_items: -  - "{{ new_elb }}" diff --git a/roles/openshift_aws/tasks/elb_single.yml b/roles/openshift_aws/tasks/elb_single.yml new file mode 100644 index 000000000..864757549 --- /dev/null +++ b/roles/openshift_aws/tasks/elb_single.yml @@ -0,0 +1,34 @@ +--- +- name: "dump the elb listeners for {{ item.key }}" +  debug: +    msg: "{{ item.value }}" +    verbosity: 1 + +- name: "Create ELB {{ item.value.name }}" +  ec2_elb_lb: +    name: "{{ item.value.name }}" +    state: present +    cross_az_load_balancing: "{{ item.value.cross_az_load_balancing }}" +    security_group_names: "{{ l_elb_security_groups[l_elb_dict_item.key] }}" +    idle_timeout: "{{ item.value.idle_timout }}" +    region: "{{ openshift_aws_region }}" +    subnets: +    - "{{ subnetout.subnets[0].id }}" +    health_check: "{{ item.value.health_check }}" +    listeners: "{{ item.value.listeners }}" +    scheme: "{{ (item.key == 'internal') | ternary('internal','internet-facing') }}" +    tags: "{{ item.value.tags }}" +    wait: True +  register: new_elb +  retries: 20 +  delay: 5 +  until: new_elb | succeeded +  ignore_errors: yes + +- fail: +    msg: "couldn't create ELB {{ item.value.name }}" +  when: not new_elb | succeeded + +- debug: +    msg: "{{ new_elb }}" +    verbosity: 1 diff --git a/roles/openshift_aws/tasks/iam_cert.yml b/roles/openshift_aws/tasks/iam_cert.yml index f74a62b8b..42d7d951c 100644 --- a/roles/openshift_aws/tasks/iam_cert.yml +++ b/roles/openshift_aws/tasks/iam_cert.yml @@ -18,7 +18,9 @@    - openshift_aws_iam_cert_key_path != ''    - openshift_aws_elb_cert_arn == '' -- debug: msg="{{ elb_cert_chain }}" +- debug: +    msg: "{{ elb_cert_chain }}" +    verbosity: 1  - name: set_fact openshift_aws_elb_cert_arn    set_fact: @@ -28,8 +30,3 @@    - openshift_aws_iam_cert_path != ''    - openshift_aws_iam_cert_key_path != ''    - openshift_aws_elb_cert_arn == '' - -- name: wait for cert to propagate -  pause: -    seconds: 5 -  when: elb_cert_chain.changed diff --git a/roles/openshift_aws/tasks/uninstall_elb.yml b/roles/openshift_aws/tasks/uninstall_elb.yml new file mode 100644 index 000000000..147e9a905 --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_elb.yml @@ -0,0 +1,11 @@ +--- +- name: delete elbs +  ec2_elb_lb: +    name: "{{ item }}" +    region: "{{ openshift_aws_region }}" +    state: absent +  with_items: "{{ openshift_aws_elb_dict | json_query('*.*.name') | sum(start = []) }}" + +- when: openshift_aws_create_iam_cert | bool +  name: delete the iam_cert for elb certificate +  include_tasks: uninstall_iam_cert.yml diff --git a/roles/openshift_aws/tasks/uninstall_iam_cert.yml b/roles/openshift_aws/tasks/uninstall_iam_cert.yml new file mode 100644 index 000000000..7b47673ee --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_iam_cert.yml @@ -0,0 +1,25 @@ +--- +- when: +  - openshift_aws_create_iam_cert | bool +  - openshift_aws_iam_cert_path != '' +  - openshift_aws_iam_cert_key_path != '' +  - openshift_aws_elb_cert_arn == '' +  block: +  - name: delete AWS IAM certificates +    iam_cert23: +      state: absent +      name: "{{ openshift_aws_iam_cert_name }}" +    register: elb_cert_chain +    retries: 20 +    delay: 10 +    until: elb_cert_chain | succeeded +    ignore_errors: yes + +  - debug: +      var: elb_cert_chain +      verbosity: 1 + +  - name: check for iam cert error +    fail: +      msg: "Couldn't delete IAM cert {{ openshift_aws_iam_cert_name }}" +    when: not elb_cert_chain | succeeded diff --git a/roles/openshift_aws/tasks/vpc_and_subnet_id.yml b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml index 1b754f863..c2c345faf 100644 --- a/roles/openshift_aws/tasks/vpc_and_subnet_id.yml +++ b/roles/openshift_aws/tasks/vpc_and_subnet_id.yml @@ -7,7 +7,9 @@    register: vpcout  - name: debug vcpout -  debug: var=vpcout +  debug: +    var: vpcout +    verbosity: 1  - name: fetch the default subnet id    ec2_vpc_subnet_facts: @@ -18,4 +20,6 @@    register: subnetout  - name: debug subnetout -  debug: var=subnetout +  debug: +    var: subnetout +    verbosity: 1 diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py index 4352778c2..fa398e5a9 100644 --- a/roles/openshift_health_checker/openshift_checks/ovs_version.py +++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py @@ -22,7 +22,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):          (3, 7): ["2.6", "2.7", "2.8", "2.9"],          (3, 8): ["2.6", "2.7", "2.8", "2.9"],          (3, 9): ["2.6", "2.7", "2.8", "2.9"], -        (3, 10): ["2.8", "2.9"], +        (3, 10): ["2.7", "2.8", "2.9"],      }      def is_active(self): diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py index 3e8c1dac3..68022deca 100644 --- a/roles/openshift_health_checker/openshift_checks/package_version.py +++ b/roles/openshift_health_checker/openshift_checks/package_version.py @@ -18,7 +18,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):          (3, 7): ["2.6", "2.7", "2.8", "2.9"],          (3, 8): ["2.6", "2.7", "2.8", "2.9"],          (3, 9): ["2.6", "2.7", "2.8", "2.9"], -        (3, 10): ["2.8", "2.9"], +        (3, 10): ["2.7", "2.8", "2.9"],      }      openshift_to_docker_version = { diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index ce27e238f..a92b63979 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -1,25 +1,16 @@  --- -- set_fact: -    openshift_master_certs_no_etcd: -    - admin.crt -    - master.kubelet-client.crt -    - master.proxy-client.crt -    - master.server.crt -    - openshift-master.crt -    - openshift-registry.crt -    - openshift-router.crt -    - etcd.server.crt -    openshift_master_certs_etcd: -    - master.etcd-client.crt - -- set_fact: -    openshift_master_certs: "{{ (openshift_master_certs_no_etcd | union(openshift_master_certs_etcd )) if openshift_master_etcd_hosts | length > 0 else openshift_master_certs_no_etcd }}" -  - name: Check status of master certificates    stat:      path: "{{ openshift_master_config_dir }}/{{ item }}"    with_items: -  - "{{ openshift_master_certs }}" +  - admin.crt +  - ca.crt +  - ca-bundle.crt +  - master.kubelet-client.crt +  - master.proxy-client.crt +  - master.server.crt +  - openshift-master.crt +  - service-signer.crt    register: g_master_cert_stat_result    when: not openshift_certificates_redeploy | default(false) | bool diff --git a/roles/openshift_prometheus/tasks/uninstall.yaml b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml index d746402db..d746402db 100644 --- a/roles/openshift_prometheus/tasks/uninstall.yaml +++ b/roles/openshift_prometheus/tasks/uninstall_prometheus.yaml diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml index 34ba78404..a54bf6de2 100644 --- a/roles/openshift_provisioners/defaults/main.yaml +++ b/roles/openshift_provisioners/defaults/main.yaml @@ -11,7 +11,7 @@ openshift_provisioners_project: openshift-infra  openshift_provisioners_image_prefix_dict:    origin: "docker.io/openshift/origin-" -  openshift-enterprise: "registry.access.redhat.com/openshift3/ose-" +  openshift-enterprise: "registry.access.redhat.com/openshift3/"  openshift_provisioners_image_version_dict:    origin: "latest" diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index 70a89b0ba..65d38793c 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -81,8 +81,8 @@ GlusterFS cluster into a new or existing OpenShift cluster:  | openshift_storage_glusterfs_name                       | 'storage'               | A name to identify the GlusterFS cluster, which will be used in resource names  | openshift_storage_glusterfs_nodeselector               | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name  | openshift_storage_glusterfs_use_default_selector       | False                   | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. -| openshift_storage_glusterfs_storageclass               | True                    | Automatically create a StorageClass for each GlusterFS cluster -| openshift_storage_glusterfs_storageclass_default       | False                   | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_storageclass               | True                    | Automatically create a GlusterFS StorageClass for this group +| openshift_storage_glusterfs_storageclass_default       | False                   | Sets the GlusterFS StorageClass for this group as cluster-wide default  | openshift_storage_glusterfs_image                      | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7'  | openshift_storage_glusterfs_version                    | 'latest'                | Container image version to use for GlusterFS pods  | openshift_storage_glusterfs_block_deploy               | True                    | Deploy glusterblock provisioner service @@ -91,8 +91,8 @@ GlusterFS cluster into a new or existing OpenShift cluster:  | openshift_storage_glusterfs_block_host_vol_create      | True                    | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned  | openshift_storage_glusterfs_block_host_vol_size        | 100                     | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes  | openshift_storage_glusterfs_block_host_vol_max         | 15                      | Max number of GlusterFS volumes to host glusterblock volumes -| openshift_storage_glusterfs_block_storageclass         | False                   | Automatically create a StorageClass for each Gluster Block cluster -| openshift_storage_glusterfs_block_storageclass_default | False                   | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_block_storageclass         | False                   | Automatically create a StorageClass for each glusterblock cluster +| openshift_storage_glusterfs_block_storageclass_default | False                   | Sets the glusterblock StorageClass for this group as cluster-wide default  | openshift_storage_glusterfs_s3_deploy                  | True                    | Deploy gluster-s3 service  | openshift_storage_glusterfs_s3_image                   | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7'  | openshift_storage_glusterfs_s3_version                 | 'latest'                | Container image version to use for gluster=s3 pod @@ -118,8 +118,8 @@ GlusterFS cluster into a new or existing OpenShift cluster:  | openshift_storage_glusterfs_heketi_ssh_user            | 'root'                  | SSH user for external GlusterFS nodes via native heketi  | openshift_storage_glusterfs_heketi_ssh_sudo            | False                   | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi  | openshift_storage_glusterfs_heketi_ssh_keyfile         | Undefined               | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path -| openshift_storage_glusterfs_heketi_fstab         | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed -| openshift_storage_glusterfs_heketi_wipe          | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` +| openshift_storage_glusterfs_heketi_fstab               | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed +| openshift_storage_glusterfs_heketi_wipe                | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe`  Each role variable also has a corresponding variable to optionally configure a  separate GlusterFS cluster for use as storage for an integrated Docker @@ -131,11 +131,11 @@ are an exception:  | Name                                                            | Default value         | Description                             |  |-----------------------------------------------------------------|-----------------------|-----------------------------------------|  | openshift_storage_glusterfs_registry_namespace                  | registry namespace    | Default is to use the hosted registry's namespace, otherwise 'glusterfs' -| openshift_storage_glusterfs_registry_name                       | 'registry'            | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters -| openshift_storage_glusterfs_registry_storageclass               | False                 | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_storageclass_default       | False                 | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_registry_block_storageclass         | False                 | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_block_storageclass_default | False                 | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_registry_name                       | 'registry'            | This allows for the logical separation of the registry group from other Gluster groups +| openshift_storage_glusterfs_registry_storageclass               | False                 | It is recommended to not create a StorageClass for this group, so as to avoid noisy neighbor complications +| openshift_storage_glusterfs_registry_storageclass_default       | False                 | Separate from the above +| openshift_storage_glusterfs_registry_block_storageclass         | False                 | Only enable this for use by Logging and Metrics +| openshift_storage_glusterfs_registry_block_storageclass_default | False                 | Separate from the above  | openshift_storage_glusterfs_registry_heketi_admin_key           | auto-generated        | Separate from the above  | openshift_storage_glusterfs_registry_heketi_user_key            | auto-generated        | Separate from the above diff --git a/roles/openshift_web_console/files/console-template.yaml b/roles/openshift_web_console/files/console-template.yaml index 547e7a265..5bcfcf73f 100644 --- a/roles/openshift_web_console/files/console-template.yaml +++ b/roles/openshift_web_console/files/console-template.yaml @@ -67,10 +67,17 @@ objects:                port: 8443                scheme: HTTPS            livenessProbe: -            httpGet: -              path: / -              port: 8443 -              scheme: HTTPS +            exec: +              command: +                - /bin/sh +                - -i +                - -c +                - |- +                  if [[ ! -f /tmp/webconsole-config.hash ]]; then \ +                    md5sum /var/webconsole-config/webconsole-config.yaml > /tmp/webconsole-config.hash; \ +                  elif [[ $(md5sum /var/webconsole-config/webconsole-config.yaml) != $(cat /tmp/webconsole-config.hash) ]]; then \ +                    exit 1; \ +                  fi && curl -k -f https://0.0.0.0:8443/console/            resources:              requests:                cpu: 100m diff --git a/roles/openshift_web_console/tasks/update_console_config.yml b/roles/openshift_web_console/tasks/update_console_config.yml index 967222ea4..8b967cda3 100644 --- a/roles/openshift_web_console/tasks/update_console_config.yml +++ b/roles/openshift_web_console/tasks/update_console_config.yml @@ -5,9 +5,6 @@  # `value` properties in the same format as `yedit` module `edits`. Only  # properties passed are updated. The separator for nested properties is `#`.  # -# Note that this triggers a redeployment on the console and a brief downtime -# since it uses a `Recreate` strategy. -#  # Example usage:  #  # - include_role: @@ -55,13 +52,9 @@        state: present        from_file:          webconsole-config.yaml: "{{ mktemp_console.stdout }}/webconsole-config.yaml" -    register: update_console_config_map    - name: Remove temp directory      file:        state: absent        name: "{{ mktemp_console.stdout }}"      changed_when: False - -  - include_tasks: rollout_console.yml -    when: update_console_config_map.changed | bool diff --git a/utils/src/ooinstall/cli_installer.py b/utils/src/ooinstall/cli_installer.py index eb42721b5..ffab3bfbf 100644 --- a/utils/src/ooinstall/cli_installer.py +++ b/utils/src/ooinstall/cli_installer.py @@ -796,6 +796,17 @@ If changes are needed please edit the installer.cfg.yml config file above and re      if not unattended:          confirm_continue(message) +    error = openshift_ansible.run_prerequisites(inventory_file, oo_cfg.deployment.hosts, +                                                hosts_to_run_on, verbose) +    if error: +        # The bootstrap script will print out the log location. +        message = """ +An error was detected. After resolving the problem please relaunch the +installation process. +""" +        click.echo(message) +        sys.exit(1) +      error = openshift_ansible.run_main_playbook(inventory_file, oo_cfg.deployment.hosts,                                                  hosts_to_run_on, verbose) diff --git a/utils/src/ooinstall/openshift_ansible.py b/utils/src/ooinstall/openshift_ansible.py index 84a76fa53..5e1ad09d5 100644 --- a/utils/src/ooinstall/openshift_ansible.py +++ b/utils/src/ooinstall/openshift_ansible.py @@ -275,6 +275,21 @@ def default_facts(hosts, verbose=False):      return load_system_facts(inventory_file, os_facts_path, facts_env, verbose) +def run_prerequisites(inventory_file, hosts, hosts_to_run_on, verbose=False): +    global CFG +    prerequisites_playbook_path = os.path.join(CFG.ansible_playbook_directory, +                                               'playbooks/prerequisites.yml') +    facts_env = os.environ.copy() +    if 'ansible_log_path' in CFG.settings: +        facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] + +    # override the ansible config for prerequisites playbook run +    if 'ansible_quiet_config' in CFG.settings: +        facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config'] + +    return run_ansible(prerequisites_playbook_path, inventory_file, facts_env, verbose) + +  def run_main_playbook(inventory_file, hosts, hosts_to_run_on, verbose=False):      global CFG      if len(hosts_to_run_on) != len(hosts): @@ -282,7 +297,7 @@ def run_main_playbook(inventory_file, hosts, hosts_to_run_on, verbose=False):                                            'playbooks/openshift-node/scaleup.yml')      else:          main_playbook_path = os.path.join(CFG.ansible_playbook_directory, -                                          'playbooks/byo/openshift-cluster/config.yml') +                                          'playbooks/deploy_cluster.yml')      facts_env = os.environ.copy()      if 'ansible_log_path' in CFG.settings:          facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path'] diff --git a/utils/test/cli_installer_tests.py b/utils/test/cli_installer_tests.py index 2259f3416..e5e66c1ee 100644 --- a/utils/test/cli_installer_tests.py +++ b/utils/test/cli_installer_tests.py @@ -393,14 +393,16 @@ class UnattendedCliTests(OOCliFixture):      # unattended with config file and all installed hosts (without --force)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_get_hosts_to_run_on1(self, load_facts_mock, run_playbook_mock): +    def test_get_hosts_to_run_on1(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          mock_facts = copy.deepcopy(MOCK_FACTS)          mock_facts['10.0.0.1']['common']['version'] = "3.0.0"          mock_facts['10.0.0.2']['common']['version'] = "3.0.0"          mock_facts['10.0.0.3']['common']['version'] = "3.0.0"          load_facts_mock.return_value = (mock_facts, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -415,12 +417,15 @@ class UnattendedCliTests(OOCliFixture):      # unattended with config file and all installed hosts (with --force)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_get_hosts_to_run_on2(self, load_facts_mock, run_playbook_mock): +    def test_get_hosts_to_run_on2(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          mock_facts = copy.deepcopy(MOCK_FACTS)          mock_facts['10.0.0.1']['common']['version'] = "3.0.0"          mock_facts['10.0.0.2']['common']['version'] = "3.0.0"          mock_facts['10.0.0.3']['common']['version'] = "3.0.0" +        prerequisites_mock.return_value = 0 +          self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock,                                           cli_input=None,                                           exp_hosts_len=3, @@ -429,9 +434,11 @@ class UnattendedCliTests(OOCliFixture):      # unattended with config file and no installed hosts (without --force)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_get_hosts_to_run_on3(self, load_facts_mock, run_playbook_mock): +    def test_get_hosts_to_run_on3(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,                                           cli_input=None, @@ -441,9 +448,11 @@ class UnattendedCliTests(OOCliFixture):      # unattended with config file and no installed hosts (with --force)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_get_hosts_to_run_on4(self, load_facts_mock, run_playbook_mock): +    def test_get_hosts_to_run_on4(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          self._verify_get_hosts_to_run_on(MOCK_FACTS, load_facts_mock, run_playbook_mock,                                           cli_input=None, @@ -453,8 +462,9 @@ class UnattendedCliTests(OOCliFixture):      # unattended with config file and some installed some uninstalled hosts (without --force)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_get_hosts_to_run_on5(self, load_facts_mock, run_playbook_mock): +    def test_get_hosts_to_run_on5(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          mock_facts = copy.deepcopy(MOCK_FACTS)          mock_facts['10.0.0.1']['common']['version'] = "3.0.0"          mock_facts['10.0.0.2']['common']['version'] = "3.0.0" @@ -465,22 +475,24 @@ class UnattendedCliTests(OOCliFixture):                                           force=False)      # unattended with config file and some installed some uninstalled hosts (with --force) -    @patch('ooinstall.openshift_ansible.run_main_playbook') -    @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock): -        mock_facts = copy.deepcopy(MOCK_FACTS) -        mock_facts['10.0.0.1']['common']['version'] = "3.0.0" -        mock_facts['10.0.0.2']['common']['version'] = "3.0.0" -        self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock, -                                         cli_input=None, -                                         exp_hosts_len=3, -                                         exp_hosts_to_run_on_len=3, -                                         force=True) +    # @patch('ooinstall.openshift_ansible.run_main_playbook') +    # @patch('ooinstall.openshift_ansible.load_system_facts') +    # def test_get_hosts_to_run_on6(self, load_facts_mock, run_playbook_mock): +    #     mock_facts = copy.deepcopy(MOCK_FACTS) +    #     mock_facts['10.0.0.1']['common']['version'] = "3.0.0" +    #     mock_facts['10.0.0.2']['common']['version'] = "3.0.0" +    #     self._verify_get_hosts_to_run_on(mock_facts, load_facts_mock, run_playbook_mock, +    #                                      cli_input=None, +    #                                      exp_hosts_len=3, +    #                                      exp_hosts_to_run_on_len=3, +    #                                      force=True)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_cfg_full_run(self, load_facts_mock, run_playbook_mock): +    def test_cfg_full_run(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -514,10 +526,12 @@ class UnattendedCliTests(OOCliFixture):          self.assertEquals(3, len(hosts_to_run_on))      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_inventory_write(self, load_facts_mock, run_playbook_mock): +    def test_inventory_write(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          merged_config = SAMPLE_CONFIG % 'openshift-enterprise'          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -551,9 +565,11 @@ class UnattendedCliTests(OOCliFixture):              self.assertTrue('openshift_public_hostname' in master_line)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_variant_version_latest_assumed(self, load_facts_mock, run_playbook_mock): +    def test_variant_version_latest_assumed(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -578,9 +594,11 @@ class UnattendedCliTests(OOCliFixture):                            inventory.get('OSEv3:vars', 'deployment_type'))      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_variant_version_preserved(self, load_facts_mock, run_playbook_mock): +    def test_variant_version_preserved(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config = SAMPLE_CONFIG % 'openshift-enterprise' @@ -606,9 +624,11 @@ class UnattendedCliTests(OOCliFixture):      # unattended with bad config file and no installed hosts (without --force)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_bad_config(self, load_facts_mock, run_playbook_mock): +    def test_bad_config(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -625,9 +645,11 @@ class UnattendedCliTests(OOCliFixture):      # unattended with three masters, one node, and haproxy      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_quick_ha_full_run(self, load_facts_mock, run_playbook_mock): +    def test_quick_ha_full_run(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -646,9 +668,11 @@ class UnattendedCliTests(OOCliFixture):      # unattended with two masters, one node, and haproxy      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_quick_ha_only_2_masters(self, load_facts_mock, run_playbook_mock): +    def test_quick_ha_only_2_masters(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -664,9 +688,11 @@ class UnattendedCliTests(OOCliFixture):      # unattended with three masters, one node, but no load balancer specified:      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_quick_ha_no_lb(self, load_facts_mock, run_playbook_mock): +    def test_quick_ha_no_lb(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -682,9 +708,11 @@ class UnattendedCliTests(OOCliFixture):      # unattended with three masters, one node, and one of the masters reused as load balancer:      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_quick_ha_reused_lb(self, load_facts_mock, run_playbook_mock): +    def test_quick_ha_reused_lb(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -699,9 +727,11 @@ class UnattendedCliTests(OOCliFixture):      # unattended with preconfigured lb      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_quick_ha_preconfigured_lb(self, load_facts_mock, run_playbook_mock): +    def test_quick_ha_preconfigured_lb(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config( @@ -728,9 +758,11 @@ class AttendedCliTests(OOCliFixture):          self.cli_args.extend(["-c", self.config_file])      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_full_run(self, load_facts_mock, run_playbook_mock): +    def test_full_run(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          cli_input = build_input( @@ -764,8 +796,9 @@ class AttendedCliTests(OOCliFixture):      # interactive with config file and some installed some uninstalled hosts      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_scaleup_hint(self, load_facts_mock, run_playbook_mock): +    def test_scaleup_hint(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          # Modify the mock facts to return a version indicating OpenShift          # is already installed on our master, and the first node. @@ -774,6 +807,7 @@ class AttendedCliTests(OOCliFixture):          mock_facts['10.0.0.2']['common']['version'] = "3.0.0"          load_facts_mock.return_value = (mock_facts, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          cli_input = build_input( @@ -797,9 +831,11 @@ class AttendedCliTests(OOCliFixture):          self.assert_result(result, 1)      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_fresh_install_with_config(self, load_facts_mock, run_playbook_mock): +    def test_fresh_install_with_config(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          config_file = self.write_config(os.path.join(self.work_dir, @@ -821,6 +857,7 @@ class AttendedCliTests(OOCliFixture):  #    #interactive with config file and all installed hosts  #    @patch('ooinstall.openshift_ansible.run_main_playbook') +#    @patch('ooinstall.openshift_ansible.run_prerequisites')  #    @patch('ooinstall.openshift_ansible.load_system_facts')  #    def test_get_hosts_to_run_on(self, load_facts_mock, run_playbook_mock):  #        mock_facts = copy.deepcopy(MOCK_FACTS) @@ -846,9 +883,11 @@ class AttendedCliTests(OOCliFixture):      # interactive multimaster: one more node than master      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_ha_dedicated_node(self, load_facts_mock, run_playbook_mock): +    def test_ha_dedicated_node(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          cli_input = build_input( @@ -889,9 +928,11 @@ class AttendedCliTests(OOCliFixture):      # interactive multimaster: identical masters and nodes      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_ha_no_dedicated_nodes(self, load_facts_mock, run_playbook_mock): +    def test_ha_no_dedicated_nodes(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          cli_input = build_input( @@ -958,9 +999,11 @@ class AttendedCliTests(OOCliFixture):      # interactive multimaster: attempting to use a master as the load balancer should fail:      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_ha_reuse_master_as_lb(self, load_facts_mock, run_playbook_mock): +    def test_ha_reuse_master_as_lb(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS_QUICKHA, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          cli_input = build_input( @@ -981,9 +1024,11 @@ class AttendedCliTests(OOCliFixture):      # interactive all-in-one      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_all_in_one(self, load_facts_mock, run_playbook_mock): +    def test_all_in_one(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          cli_input = build_input( @@ -1010,9 +1055,11 @@ class AttendedCliTests(OOCliFixture):                                         'openshift_schedulable=True')      @patch('ooinstall.openshift_ansible.run_main_playbook') +    @patch('ooinstall.openshift_ansible.run_prerequisites')      @patch('ooinstall.openshift_ansible.load_system_facts') -    def test_gen_inventory(self, load_facts_mock, run_playbook_mock): +    def test_gen_inventory(self, load_facts_mock, prerequisites_mock, run_playbook_mock):          load_facts_mock.return_value = (MOCK_FACTS, 0) +        prerequisites_mock.return_value = 0          run_playbook_mock.return_value = 0          cli_input = build_input( | 
