diff options
136 files changed, 952 insertions, 364 deletions
diff --git a/.papr.inventory b/.papr.inventory index c678e76aa..80ad81efa 100644 --- a/.papr.inventory +++ b/.papr.inventory @@ -22,6 +22,6 @@ ocp-master  ocp-master  [nodes] -ocp-master openshift_schedulable=false +ocp-master openshift_schedulable=true  ocp-node1  openshift_node_labels="{'region':'infra'}"  ocp-node2  openshift_node_labels="{'region':'infra'}" diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible index c5214c999..120ce408f 100644 --- a/.tito/packages/openshift-ansible +++ b/.tito/packages/openshift-ansible @@ -1 +1 @@ -3.9.0-0.31.0 ./ +3.9.0-0.38.0 ./ diff --git a/CONTRIBUTING.md b/CONTRIBUTING.md index 1c0fa73ad..ef0a302dc 100644 --- a/CONTRIBUTING.md +++ b/CONTRIBUTING.md @@ -74,6 +74,27 @@ If you are new to Git, these links might help:  --- +## Simple all-in-one localhost installation +``` +git clone https://github.com/openshift/openshift-ansible +cd openshift-ansible +sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml +sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml +``` + +## Development process +Most changes can be applied by re-running the config playbook. However, while +the config playbook will run faster the second time through it's still going to +take a very long time. As such, you may wish to run a smaller subsection of the +installation playbooks. You can for instance run the node, master, or hosted +playbooks in playbooks/openshift-node/config.yml, +playbooks/openshift-master/config.yml, playbooks/openshift-hosted/config.yml +respectively. + +We're actively working to refactor the playbooks into smaller discrete +components and we'll be documenting that structure shortly, for now those are +the most sensible logical units of work. +  ## Running tests and other verification tasks  We use [`tox`](http://readthedocs.org/docs/tox/) to manage virtualenvs where @@ -74,7 +74,27 @@ Fedora:  dnf install -y ansible pyOpenSSL python-cryptography python-lxml  ``` -## OpenShift Installation Documentation: +Additional requirements: + +Logging: + +- java-1.8.0-openjdk-headless + +Metrics: + +- httpd-tools + +## Simple all-in-one localhost Installation +This assumes that you've installed the base dependencies and you're running on +Fedora or RHEL +``` +git clone https://github.com/openshift/openshift-ansible +cd openshift-ansible +sudo ansible-playbook -i inventory/hosts.localhost playbooks/prerequisites.yml +sudo ansible-playbook -i inventory/hosts.localhost playbooks/deploy_cluster.yml +``` + +## Complete Production Installation Documentation:  - [OpenShift Enterprise](https://docs.openshift.com/enterprise/latest/install_config/install/advanced_install.html)  - [OpenShift Origin](https://docs.openshift.org/latest/install_config/install/advanced_install.html) diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile index 22a0d06a0..c9ec8ba41 100644 --- a/images/installer/Dockerfile +++ b/images/installer/Dockerfile @@ -10,7 +10,7 @@ COPY images/installer/origin-extra-root /  # install ansible and deps  RUN INSTALL_PKGS="python-lxml python-dns pyOpenSSL python2-cryptography openssl java-1.8.0-openjdk-headless python2-passlib httpd-tools openssh-clients origin-clients" \   && yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS \ - && EPEL_PKGS="ansible python2-boto python2-boto3 google-cloud-sdk-183.0.0 which" \ + && EPEL_PKGS="ansible python2-boto python2-boto3 python2-crypto google-cloud-sdk-183.0.0 which" \   && yum install -y epel-release \   && yum install -y --setopt=tsflags=nodocs $EPEL_PKGS \   && EPEL_TESTING_PKGS="python2-libcloud" \ diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7 index 3b05c1aa6..5da950744 100644 --- a/images/installer/Dockerfile.rhel7 +++ b/images/installer/Dockerfile.rhel7 @@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>  USER root  # Playbooks, roles, and their dependencies are installed from packages. -RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \ +RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \   && yum repolist > /dev/null \   && yum-config-manager --enable rhel-7-server-ose-3.7-rpms \   && yum-config-manager --enable rhel-7-server-rh-common-rpms \ diff --git a/inventory/hosts.example b/inventory/hosts.example index f9f331880..82c588100 100644 --- a/inventory/hosts.example +++ b/inventory/hosts.example @@ -325,7 +325,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',  # or to one or all of the masters defined in the inventory if no load  # balancer is present.  #openshift_master_cluster_hostname=openshift-ansible.test.example.com -#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com + +# If an external load balancer is used public hostname should resolve to +# external load balancer address +#openshift_master_cluster_public_hostname=openshift-ansible.public.example.com  # Configure controller arguments  #osm_controller_args={'resource-quota-sync-period': ['10s']} @@ -1114,10 +1117,9 @@ ose3-etcd[1:3]-ansible.test.example.com  ose3-lb-ansible.test.example.com containerized=false  # NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes -# However, in order to ensure that your masters are not burdened with running pods you should -# make them unschedulable by adding openshift_schedulable=False any node that's also a master.  [nodes] -ose3-master[1:3]-ansible.test.example.com +# masters should be schedulable to run web console pods +ose3-master[1:3]-ansible.test.example.com openshift_schedulable=True  ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"  [nfs] diff --git a/inventory/hosts.glusterfs.external.example b/inventory/hosts.glusterfs.external.example index bf2557cf0..e718e3280 100644 --- a/inventory/hosts.glusterfs.external.example +++ b/inventory/hosts.glusterfs.external.example @@ -35,7 +35,8 @@ openshift_storage_glusterfs_heketi_url=172.0.0.1  master  [nodes] -master  openshift_schedulable=False +# masters should be schedulable to run web console pods +master  openshift_schedulable=True  node0   openshift_schedulable=True  node1   openshift_schedulable=True  node2   openshift_schedulable=True diff --git a/inventory/hosts.glusterfs.mixed.example b/inventory/hosts.glusterfs.mixed.example index 8a20a037e..b2fc00c58 100644 --- a/inventory/hosts.glusterfs.mixed.example +++ b/inventory/hosts.glusterfs.mixed.example @@ -38,7 +38,8 @@ openshift_storage_glusterfs_heketi_ssh_keyfile=/root/id_rsa  master  [nodes] -master  openshift_schedulable=False +# masters should be schedulable to run web console pods +master  openshift_schedulable=True  node0   openshift_schedulable=True  node1   openshift_schedulable=True  node2   openshift_schedulable=True diff --git a/inventory/hosts.glusterfs.native.example b/inventory/hosts.glusterfs.native.example index 59acf1194..e5f2453ff 100644 --- a/inventory/hosts.glusterfs.native.example +++ b/inventory/hosts.glusterfs.native.example @@ -28,7 +28,8 @@ openshift_deployment_type=origin  master  [nodes] -master  openshift_schedulable=False +# masters should be schedulable to run web console pods +master  openshift_schedulable=True  # A hosted registry, by default, will only be deployed on nodes labeled  # "region=infra".  node0   openshift_schedulable=True diff --git a/inventory/hosts.glusterfs.registry-only.example b/inventory/hosts.glusterfs.registry-only.example index 6f33e9f6d..dadb2c93e 100644 --- a/inventory/hosts.glusterfs.registry-only.example +++ b/inventory/hosts.glusterfs.registry-only.example @@ -34,7 +34,8 @@ openshift_hosted_registry_storage_kind=glusterfs  master  [nodes] -master  openshift_schedulable=False +# masters should be schedulable to run web console pods +master  openshift_schedulable=True  # A hosted registry, by default, will only be deployed on nodes labeled  # "region=infra".  node0   openshift_node_labels="{'region': 'infra'}" openshift_schedulable=True diff --git a/inventory/hosts.glusterfs.storage-and-registry.example b/inventory/hosts.glusterfs.storage-and-registry.example index 1f3a4282a..184cb600b 100644 --- a/inventory/hosts.glusterfs.storage-and-registry.example +++ b/inventory/hosts.glusterfs.storage-and-registry.example @@ -35,7 +35,8 @@ openshift_hosted_registry_storage_kind=glusterfs  master  [nodes] -master  openshift_schedulable=False +# masters should be schedulable to run web console pods +master  openshift_schedulable=True  # It is recommended to not use a single cluster for both general and registry  # storage, so two three-node clusters will be required.  node0   openshift_schedulable=True diff --git a/inventory/hosts.localhost b/inventory/hosts.localhost new file mode 100644 index 000000000..41ed309e1 --- /dev/null +++ b/inventory/hosts.localhost @@ -0,0 +1,26 @@ +#bare minimum hostfile + +[OSEv3:children] +masters +nodes +etcd + +[OSEv3:vars] +# if your target hosts are Fedora uncomment this +#ansible_python_interpreter=/usr/bin/python3 +openshift_deployment_type=origin +openshift_release=3.7 +osm_cluster_network_cidr=10.128.0.0/14 +openshift_portal_net=172.30.0.0/16 +osm_host_subnet_length=9 +# localhost likely doesn't meet the minimum requirements +openshift_disable_check=disk_availability,memory_availability + +[masters] +localhost ansible_connection=local + +[etcd] +localhost ansible_connection=local + +[nodes] +localhost  ansible_connection=local openshift_schedulable=true openshift_node_labels="{'region': 'infra', 'zone': 'default'}" diff --git a/openshift-ansible.spec b/openshift-ansible.spec index f01c923d2..d14eb56cb 100644 --- a/openshift-ansible.spec +++ b/openshift-ansible.spec @@ -10,7 +10,7 @@  Name:           openshift-ansible  Version:        3.9.0 -Release:        0.31.0%{?dist} +Release:        0.38.0%{?dist}  Summary:        Openshift and Atomic Enterprise Ansible  License:        ASL 2.0  URL:            https://github.com/openshift/openshift-ansible @@ -28,6 +28,7 @@ Requires:      java-1.8.0-openjdk-headless  Requires:      httpd-tools  Requires:      libselinux-python  Requires:      python-passlib +Requires:      python2-crypto  %description  Openshift and Atomic Enterprise Ansible @@ -200,6 +201,105 @@ Atomic OpenShift Utilities includes  %changelog +* Mon Feb 05 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.38.0 +- Moving upgrade sg playbook to 3.9 (kwoodson@redhat.com) +- remove openshift_upgrade_{pre,post}_storage_migration_enabled from +  failed_when (nakayamakenjiro@gmail.com) +- Fix version handling in 3.8/3.9 control plane upgrades (rteague@redhat.com) +- add S3 bucket cleanup (jdiaz@redhat.com) +- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com) +- dynamic inventory bug when group exists but its empty (m.judeikis@gmail.com) +- Parameterize user and disable_root options in cloud config +  (nelluri@redhat.com) +- Fix softlinks broken by d3fefc32a727fe3c13159c4e9fe4399f35b487a8 +  (Klaas-@users.noreply.github.com) + +* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.37.0 +- Don't use 'omit' for package module (vrutkovs@redhat.com) +- Adding requirements for logging and metrics (ewolinet@redhat.com) +- Disable master controllers before upgrade and re-enable those when restart +  mode is system (vrutkovs@redhat.com) +- upgrade: run upgrade_control_plane and upgrade_nodes playbooks during full +  upgrade (vrutkovs@redhat.com) + +* Fri Feb 02 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.36.0 +- Add missing tasks file (sdodson@redhat.com) +- Upgrade to migrate to using push to DNS for registries. (kwoodson@redhat.com) +- Adding defaults for the gcp variables to fix an undefined ansible exception. +  (kwoodson@redhat.com) +- Fix vsphere sanitization (sdodson@redhat.com) +- Set a default for required vsphere variable (sdodson@redhat.com) +- Add python2-crypto package (ccoleman@redhat.com) +- hosts.example: clarify usage of openshift_master_cluster_public_hostname +  (vrutkovs@redhat.com) +- Conditionally create pvcs for metrics depending on whether or not it already +  exists (ewolinet@redhat.com) +- Update hosts examples with a note about scheduling on masters +  (vrutkovs@redhat.com) +- Fixing file write issue. (kwoodson@redhat.com) +- Only perform console configmap ops when >= 3.9 (sdodson@redhat.com) +- Remove playbooks/adhoc/openshift_hosted_logging_efk.yaml (sdodson@redhat.com) +- upgrades: use openshift_version as a regexp when checking +  openshift.common.version (vrutkovs@redhat.com) +- Don't update master-config.yaml with logging/metrics urls >= 3.9 +  (sdodson@redhat.com) +- Make master schedulable (vrutkovs@redhat.com) +- Re-add openshift_aws_elb_cert_arn. (abutcher@redhat.com) +- Ignore openshift_pkg_version during 3.8 upgrade (rteague@redhat.com) +- bug 1537857. Fix retrieving prometheus metrics (jcantril@redhat.com) +- Remove master_ha bool checks (mgugino@redhat.com) +- Don't restart docker when re-deploying node certificates (sdodson@redhat.com) +- vsphere storage default add (davis.phillips@gmail.com) + +* Wed Jan 31 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.35.0 +- add glusterblock support for ansible (m.judeikis@gmail.com) +- Add a bare minimum localhost hosts file (sdodson@redhat.com) +- copy etcd client certificates for nuage openshift monitor +  (siva_teja.areti@nokia.com) +- fix hostvars parameter name (tzumainn@redhat.com) +- remove mountpoint parameter (tzumainn@redhat.com) +- flake cleanup (tzumainn@redhat.com) +- code simplification and lint cleanup (tzumainn@redhat.com) +- Symlink kubectl to oc instead of openshift (mfojtik@redhat.com) +- Rework provisioners vars to support different prefix/version for Origin/OSE +  (vrutkovs@redhat.com) +- add cinder mountpoint to inventory (tzumainn@redhat.com) +- allow setting of kibana env vars (jcantril@redhat.com) +- No longer compare with legacy hosted var (ewolinet@redhat.com) +- Preserving ES dc storage type unless overridden by inventory variable +  (ewolinet@redhat.com) +- Fix: e2e tests failing due to :1936/metrics unaccessible. +  (jmencak@redhat.com) + +* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.34.0 +- docker_creds: decode docker_config for py3 only if its a string +  (vrutkovs@redhat.com) +- Removing ability to change default cassandra_pvc_prefix based on metrics +  volume name (ewolinet@redhat.com) +- Don't deploy the console if disabled or registry subtype (sdodson@redhat.com) +- [1538960] Correct ability to overried openshift_management_app_template +  (rteague@redhat.com) + +* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.33.0 +-  + +* Tue Jan 30 2018 Justin Pierce <jupierce@redhat.com> 3.9.0-0.32.0 +- Revert "Revert "use non-deprecated REGISTRY_OPENSHIFT_SERVER_ADDR variable to +  set the registry hostname"" (bparees@users.noreply.github.com) +- Rebase Prometheus example for new scrape endpoints and expose alert manager +  (m.judeikis@gmail.com) +- Revert "use non-deprecated REGISTRY_OPENSHIFT_SERVER_ADDR variable to set the +  registry hostname" (bparees@users.noreply.github.com) +- Bug 1539182: Detect if ClusterResourceOverrides enabled during console +  install (spadgett@redhat.com) +- Fix container_runtime variable typo (mgugino@redhat.com) +- Correct 3.7 to 3.9 upgrade openshift_image_tag (mgugino@redhat.com) +- Fix misaligned ports for sg,elb,api (mazzystr@gmail.com) +- Add GPG keys in the base image and don't install docker (ccoleman@redhat.com) +- Change catalog roles install to use aggregation (jpeeler@redhat.com) +- Make IP object a string (fabian@fabianism.us) +- Add kube service ipaddress to no_proxy list (sdodson@redhat.com) +  * Sat Jan 27 2018 Jenkins CD Merge Bot <smunilla@redhat.com> 3.9.0-0.31.0  - removed references to 'files' dir in spec file (dyocum@redhat.com)  - files in ansible roles do not need to have the path specified to them when diff --git a/playbooks/adhoc/openshift_hosted_logging_efk.yaml b/playbooks/adhoc/openshift_hosted_logging_efk.yaml deleted file mode 100644 index faeb332ad..000000000 --- a/playbooks/adhoc/openshift_hosted_logging_efk.yaml +++ /dev/null @@ -1,16 +0,0 @@ ---- -- hosts: masters[0] -  roles: -  - role: openshift_logging -    openshift_hosted_logging_cleanup: no - -- name: Update master-config for publicLoggingURL -  hosts: masters:!masters[0] -  pre_tasks: -  - set_fact: -      openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ openshift_master_default_subdomain }}" -  tasks: -  - import_role: -      name: openshift_logging -      tasks_from: update_master_config -    when: openshift_hosted_logging_deploy | default(false) | bool diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md index bdc98d1e0..cf811ca84 100644 --- a/playbooks/aws/README.md +++ b/playbooks/aws/README.md @@ -201,9 +201,7 @@ There are more enhancements that are arriving for provisioning.  These will incl  ## Uninstall / Deprovisioning -At this time, only deprovisioning of the output of the prerequisites step is provided. You can/must manually remove things like ELBs and scale groups before attempting to undo the work by the preprovisiong step. - -To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning. +To undo the work done by the prerequisites playbook, simply call the uninstall_prerequisites.yml playbook. You will have needed to remove any of the other objects (ie ELBs, instances, etc) before attempting. You should use the same inventory file and provisioning_vars.yml file that was used during provisioning.  ```  ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_prerequisites.yml @@ -211,4 +209,10 @@ ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars fi  This should result in removal of the security groups and VPC that were created. +Cleaning up the S3 bucket contents can be accomplished with: + +``` +ansible-playbook -i <previous inventory file> -e @<previous provisioning_vars file> uninstall_s3.yml +``` +  NOTE: If you want to also remove the ssh keys that were uploaded (**these ssh keys would be shared if you are running multiple clusters in the same AWS account** so we don't remove these by default) then you should add 'openshift_aws_enable_uninstall_shared_objects: True' to your provisioning_vars.yml file. diff --git a/playbooks/aws/openshift-cluster/uninstall_s3.yml b/playbooks/aws/openshift-cluster/uninstall_s3.yml new file mode 100644 index 000000000..448b47aee --- /dev/null +++ b/playbooks/aws/openshift-cluster/uninstall_s3.yml @@ -0,0 +1,10 @@ +--- +- name: Empty/delete s3 bucket +  hosts: localhost +  connection: local +  tasks: +  - name: empty/delete s3 bucket +    include_role: +      name: openshift_aws +      tasks_from: uninstall_s3.yml +    when: openshift_aws_create_s3 | default(true) | bool diff --git a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml index 23a3fcbb5..23a3fcbb5 100644 --- a/playbooks/byo/openshift-cluster/upgrades/v3_7/upgrade_scale_groups.yml +++ b/playbooks/byo/openshift-cluster/upgrades/v3_9/upgrade_scale_groups.yml diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml index de612da21..f44ab3580 100644 --- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml @@ -113,6 +113,22 @@      registry_url: "{{ openshift.master.registry_url }}"      openshift_hosted_templates_import_command: replace +  post_tasks: +  # we need to migrate customers to the new pattern of pushing to the registry via dns +  # Step 1: verify the certificates have the docker registry service name +  - shell: > +      echo -n | openssl s_client -showcerts -servername docker-registry.default.svc -connect docker-registry.default.svc:5000  | openssl x509 -text |  grep -A1 'X509v3 Subject Alternative Name:' | grep -Pq 'DNS:docker-registry\.default\.svc(,|$)' +    register: cert_output + +  # Step 2: Set a fact to be used to determine if we should run the redeploy of registry certs +  - name: set a fact to include the registry certs playbook if needed +    set_fact: +      openshift_hosted_rollout_certs_and_registry: "{{ cert_output.rc == 0  }}" + +# Run the redeploy certs based upon the certificates +- when: hostvars[groups.oo_first_master.0].openshift_hosted_rollout_certs_and_registry +  import_playbook: ../../../openshift-hosted/redeploy-registry-certificates.yml +  # Check for warnings to be printed at the end of the upgrade:  - name: Clean up and display warnings    hosts: oo_masters_to_config diff --git a/playbooks/common/openshift-cluster/upgrades/pre/config.yml b/playbooks/common/openshift-cluster/upgrades/pre/config.yml index 2b27f8dd0..44af37b2d 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/config.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/config.yml @@ -5,8 +5,6 @@  # Pre-upgrade  - import_playbook: ../initialize_nodes_to_upgrade.yml -- import_playbook: verify_cluster.yml -  - name: Update repos on upgrade hosts    hosts: "{{ l_upgrade_repo_hosts }}"    roles: @@ -53,6 +51,8 @@      # l_openshift_version_set_hosts is passed via upgrade_control_plane.yml      # l_openshift_version_check_hosts is passed via upgrade_control_plane.yml +- import_playbook: verify_cluster.yml +  # If we're only upgrading nodes, we need to ensure masters are already upgraded  - name: Verify masters are already upgraded    hosts: oo_masters_to_config @@ -60,7 +60,7 @@    - fail: msg="Master running {{ openshift.common.version }} must be upgraded to {{ openshift_version }} before node upgrade can be run."      when:      - l_upgrade_nodes_only | default(False) | bool -    - openshift.common.version != openshift_version +    - not openshift.common.version | match(openshift_version)  # If we're only upgrading nodes, skip this.  - import_playbook: ../../../../openshift-master/private/validate_restart.yml diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml index 5ee8a9d78..463a05688 100644 --- a/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml +++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_cluster.yml @@ -17,6 +17,7 @@          valid version for a {{ openshift_upgrade_target }} upgrade      when:      - openshift_pkg_version is defined +    - openshift_pkg_version != ""      - openshift_pkg_version.split('-',1).1 is version_compare(openshift_upgrade_target ,'<')    - fail: @@ -25,6 +26,7 @@          valid version for a {{ openshift_upgrade_target }} upgrade      when:      - openshift_image_tag is defined +    - openshift_image_tag != ""      - openshift_image_tag.split('v',1).1 is version_compare(openshift_upgrade_target ,'<')    - set_fact: diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml index a10fd4bee..9b5ba3482 100644 --- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml @@ -56,7 +56,6 @@      register: l_pb_upgrade_control_plane_pre_upgrade_storage      when: openshift_upgrade_pre_storage_migration_enabled | default(true) | bool      failed_when: -    - openshift_upgrade_pre_storage_migration_enabled | default(true) | bool      - l_pb_upgrade_control_plane_pre_upgrade_storage.rc != 0      - openshift_upgrade_pre_storage_migration_fatal | default(true) | bool @@ -72,8 +71,6 @@  # support for optional hooks to be defined.  - name: Upgrade master    hosts: oo_masters_to_config -  vars: -    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"    serial: 1    roles:    - openshift_facts @@ -96,6 +93,12 @@    - include_tasks: "{{ openshift_master_upgrade_hook }}"      when: openshift_master_upgrade_hook is defined +  - name: Disable master controller +    service: +      name: "{{ openshift_service_type }}-master-controllers" +      enabled: false +    when: openshift.common.rolling_restart_mode == 'system' +    - include_tasks: ../../../openshift-master/private/tasks/restart_hosts.yml      when: openshift.common.rolling_restart_mode == 'system' @@ -118,7 +121,6 @@      - openshift_upgrade_post_storage_migration_enabled | default(true) | bool      - openshift_version is version_compare('3.7','<')      failed_when: -    - openshift_upgrade_post_storage_migration_enabled | default(true) | bool      - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0      - openshift_upgrade_post_storage_migration_fatal | default(false) | bool      run_once: true @@ -254,7 +256,6 @@      register: l_pb_upgrade_control_plane_post_upgrade_storage      when: openshift_upgrade_post_storage_migration_enabled | default(true) | bool      failed_when: -    - openshift_upgrade_post_storage_migration_enabled | default(true) | bool      - l_pb_upgrade_control_plane_post_upgrade_storage.rc != 0      - openshift_upgrade_post_storage_migration_fatal | default(false) | bool diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml index bf6e8605e..ec1da6d39 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade.yml @@ -2,54 +2,6 @@  #  # Full Control Plane + Nodes Upgrade  # -- import_playbook: ../init.yml +- import_playbook: upgrade_control_plane.yml -- name: Configure the upgrade target for the common upgrade tasks -  hosts: oo_all_hosts -  tasks: -  - set_fact: -      openshift_upgrade_target: '3.9' -      openshift_upgrade_min: '3.7' -      openshift_release: '3.9' - -- import_playbook: ../pre/config.yml -  vars: -    l_upgrade_repo_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config:oo_lb_to_config" -    l_upgrade_no_proxy_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" -    l_upgrade_health_check_hosts: "oo_masters_to_config:oo_etcd_to_config:oo_lb_to_config" -    l_upgrade_verify_targets_hosts: "oo_masters_to_config:oo_nodes_to_upgrade" -    l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config" -    l_upgrade_excluder_hosts: "oo_nodes_to_config:oo_masters_to_config" -    openshift_protect_installed_version: False - -- import_playbook: validator.yml - -- name: Flag pre-upgrade checks complete for hosts without errors -  hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config -  tasks: -  - set_fact: -      pre_upgrade_complete: True - -# Pre-upgrade completed - -- import_playbook: ../upgrade_control_plane.yml - -# All controllers must be stopped at the same time then restarted -- name: Cycle all controller services to force new leader election mode -  hosts: oo_masters_to_config -  gather_facts: no -  roles: -  - role: openshift_facts -  tasks: -  - name: Stop {{ openshift_service_type }}-master-controllers -    systemd: -      name: "{{ openshift_service_type }}-master-controllers" -      state: stopped -  - name: Start {{ openshift_service_type }}-master-controllers -    systemd: -      name: "{{ openshift_service_type }}-master-controllers" -      state: started - -- import_playbook: ../upgrade_nodes.yml - -- import_playbook: ../post_control_plane.yml +- import_playbook: upgrade_nodes.yml diff --git a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml index fe1fdefff..8792295c6 100644 --- a/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml +++ b/playbooks/common/openshift-cluster/upgrades/v3_9/upgrade_control_plane.yml @@ -26,6 +26,7 @@        openshift_upgrade_min: '3.7'        openshift_release: '3.8'        _requested_pkg_version: "{{ openshift_pkg_version if openshift_pkg_version is defined else omit }}" +      openshift_pkg_version: ''        _requested_image_tag: "{{ openshift_image_tag if openshift_image_tag is defined else omit }}"        l_double_upgrade_cp: True      when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<') @@ -61,9 +62,8 @@  # Pre-upgrade completed -- import_playbook: ../upgrade_control_plane.yml -  vars: -    openshift_release: '3.8' +- name: Intermediate 3.8 Upgrade +  import_playbook: ../upgrade_control_plane.yml    when: hostvars[groups.oo_first_master.0].openshift_currently_installed_version | version_compare('3.8','<')  ## 3.8 upgrade complete we should now be able to upgrade to 3.9 @@ -76,7 +76,7 @@        openshift_upgrade_target: '3.9'        openshift_upgrade_min: '3.8'        openshift_release: '3.9' -      openshift_pkg_version: "{{ _requested_pkg_version | default ('-3.9*') }}" +      openshift_pkg_version: "{{ _requested_pkg_version if _requested_pkg_version is defined else '' }}"    # Set the user's specified image_tag for 3.9 upgrade if it was provided.    - set_fact:        openshift_image_tag: "{{ _requested_image_tag }}" @@ -105,6 +105,7 @@      l_upgrade_docker_target_hosts: "oo_masters_to_config:oo_etcd_to_config"      l_upgrade_excluder_hosts: "oo_masters_to_config"      openshift_protect_installed_version: False +    openshift_version_reinit: True  - name: Flag pre-upgrade checks complete for hosts without errors    hosts: oo_masters_to_config:oo_etcd_to_config @@ -113,8 +114,6 @@        pre_upgrade_complete: True  - import_playbook: ../upgrade_control_plane.yml -  vars: -    openshift_release: '3.9'  # All controllers must be stopped at the same time then restarted  - name: Cycle all controller services to force new leader election mode @@ -123,14 +122,16 @@    roles:    - role: openshift_facts    tasks: -  - name: Stop {{ openshift_service_type }}-master-controllers -    systemd: +  - name: Restart master controllers to force new leader election mode +    service:        name: "{{ openshift_service_type }}-master-controllers" -      state: stopped -  - name: Start {{ openshift_service_type }}-master-controllers -    systemd: +      state: restart +    when: openshift.common.rolling_restart_mode == 'service' +  - name: Re-enable master controllers to force new leader election mode +    service:        name: "{{ openshift_service_type }}-master-controllers" -      state: started +      enabled: true +    when: openshift.common.rolling_restart_mode == 'system'  - import_playbook: ../post_control_plane.yml diff --git a/playbooks/init/base_packages.yml b/playbooks/init/base_packages.yml index 0a730a88a..81f4dd183 100644 --- a/playbooks/init/base_packages.yml +++ b/playbooks/init/base_packages.yml @@ -16,8 +16,9 @@        - iproute        - "{{ 'python3-dbus' if ansible_distribution == 'Fedora' else 'dbus-python' }}"        - "{{ 'python3-PyYAML' if ansible_distribution == 'Fedora' else 'PyYAML' }}" -      - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else omit }}" +      - "{{ 'python-ipaddress' if ansible_distribution != 'Fedora' else '' }}"        - yum-utils +      when: item != ''        register: result        until: result is succeeded diff --git a/playbooks/init/basic_facts.yml b/playbooks/init/basic_facts.yml index 06a4e7291..a9bf06693 100644 --- a/playbooks/init/basic_facts.yml +++ b/playbooks/init/basic_facts.yml @@ -67,3 +67,11 @@        first_master_client_binary: "{{  openshift_client_binary }}"        #Some roles may require this to be set for first master        openshift_client_binary: "{{ openshift_client_binary }}" + +- name: Disable web console if required +  hosts: oo_masters_to_config +  gather_facts: no +  tasks: +  - set_fact: +      openshift_web_console_install: False +    when: openshift_deployment_subtype == 'registry' or ( osm_disabled_features is defined and 'WebConsole' in osm_disabled_features ) diff --git a/playbooks/openshift-hosted/deploy_registry.yml b/playbooks/openshift-hosted/deploy_registry.yml new file mode 100644 index 000000000..2453329dd --- /dev/null +++ b/playbooks/openshift-hosted/deploy_registry.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/openshift_hosted_registry.yml diff --git a/playbooks/openshift-hosted/deploy_router.yml b/playbooks/openshift-hosted/deploy_router.yml new file mode 100644 index 000000000..e832eeeea --- /dev/null +++ b/playbooks/openshift-hosted/deploy_router.yml @@ -0,0 +1,4 @@ +--- +- import_playbook: ../init/main.yml + +- import_playbook: private/openshift_hosted_router.yml diff --git a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml index 62fe0dd60..c59ebcead 100644 --- a/playbooks/openshift-hosted/private/openshift_default_storage_class.yml +++ b/playbooks/openshift-hosted/private/openshift_default_storage_class.yml @@ -3,4 +3,6 @@    hosts: oo_first_master    roles:    - role: openshift_default_storage_class -    when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack') +    when: +    - openshift_cloudprovider_kind is defined +    - openshift_cloudprovider_kind in ['aws','gce','openstack','vsphere'] diff --git a/playbooks/openshift-logging/private/config.yml b/playbooks/openshift-logging/private/config.yml index d6b26647c..07aa8bfde 100644 --- a/playbooks/openshift-logging/private/config.yml +++ b/playbooks/openshift-logging/private/config.yml @@ -24,6 +24,7 @@      - import_role:          name: openshift_logging          tasks_from: update_master_config +      when: not openshift.common.version_gte_3_9  - name: Logging Install Checkpoint End    hosts: all diff --git a/playbooks/openshift-master/private/additional_config.yml b/playbooks/openshift-master/private/additional_config.yml index 85be0e600..ca514ed26 100644 --- a/playbooks/openshift-master/private/additional_config.yml +++ b/playbooks/openshift-master/private/additional_config.yml @@ -16,7 +16,6 @@    vars:      cockpit_plugins: "{{ osm_cockpit_plugins | default(['cockpit-kubernetes']) }}"      etcd_urls: "{{ openshift.master.etcd_urls }}" -    openshift_master_ha: "{{ groups.oo_masters | length > 1 }}"      omc_cluster_hosts: "{{ groups.oo_masters | join(' ')}}"    roles:    - role: openshift_project_request_template diff --git a/playbooks/openshift-master/private/config.yml b/playbooks/openshift-master/private/config.yml index 153ea9993..d2fc2eed8 100644 --- a/playbooks/openshift-master/private/config.yml +++ b/playbooks/openshift-master/private/config.yml @@ -78,7 +78,6 @@          console_url: "{{ openshift_master_console_url | default(None) }}"          console_use_ssl: "{{ openshift_master_console_use_ssl | default(None) }}"          public_console_url: "{{ openshift_master_public_console_url | default(None) }}" -        ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"          master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"  - name: Inspect state of first master config settings @@ -166,7 +165,6 @@    hosts: oo_masters_to_config    any_errors_fatal: true    vars: -    openshift_master_ha: "{{ openshift.master.ha }}"      openshift_master_count: "{{ openshift.master.master_count }}"      openshift_master_session_auth_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_auth_secrets }}"      openshift_master_session_encryption_secrets: "{{ hostvars[groups.oo_first_master.0].openshift.master.session_encryption_secrets }}" @@ -186,6 +184,7 @@    - role: openshift_buildoverrides    - role: nickhammond.logrotate    - role: openshift_master +    openshift_master_ha: "{{ (groups.oo_masters | length > 1) | bool }}"      openshift_master_hosts: "{{ groups.oo_masters_to_config }}"      r_openshift_master_clean_install: "{{ hostvars[groups.oo_first_master.0].l_clean_install }}"      r_openshift_master_etcd3_storage: "{{ hostvars[groups.oo_first_master.0].l_etcd3_enabled }}" diff --git a/playbooks/openshift-master/private/restart.yml b/playbooks/openshift-master/private/restart.yml index 5cb284935..17d90533c 100644 --- a/playbooks/openshift-master/private/restart.yml +++ b/playbooks/openshift-master/private/restart.yml @@ -3,16 +3,13 @@  - name: Restart masters    hosts: oo_masters_to_config -  vars: -    openshift_master_ha: "{{ groups.oo_masters_to_config | length > 1 }}"    serial: 1 -  handlers: -  - import_tasks: ../../../roles/openshift_master/handlers/main.yml    roles:    - openshift_facts    post_tasks:    - include_tasks: tasks/restart_hosts.yml      when: openshift_rolling_restart_mode | default('services') == 'system' - -  - include_tasks: tasks/restart_services.yml +  - import_role: +      name: openshift_master +      tasks_from: restart.yml      when: openshift_rolling_restart_mode | default('services') == 'services' diff --git a/playbooks/openshift-master/private/scaleup.yml b/playbooks/openshift-master/private/scaleup.yml index 007b23ea3..20ebf70d3 100644 --- a/playbooks/openshift-master/private/scaleup.yml +++ b/playbooks/openshift-master/private/scaleup.yml @@ -8,7 +8,6 @@    - openshift_facts:        role: master        local_facts: -        ha: "{{ openshift_master_ha | default(groups.oo_masters | length > 1) }}"          master_count: "{{ openshift_master_count | default(groups.oo_masters | length) }}"    - name: Update master count      modify_yaml: diff --git a/playbooks/openshift-metrics/private/config.yml b/playbooks/openshift-metrics/private/config.yml index 1e237e3f0..889ea77b1 100644 --- a/playbooks/openshift-metrics/private/config.yml +++ b/playbooks/openshift-metrics/private/config.yml @@ -25,6 +25,7 @@      import_role:        name: openshift_metrics        tasks_from: update_master_config.yaml +    when: not openshift.common.version_gte_3_9  - name: Metrics Install Checkpoint End    hosts: all diff --git a/playbooks/openshift-node/private/restart.yml b/playbooks/openshift-node/private/restart.yml index 7249ced70..7371bd7ac 100644 --- a/playbooks/openshift-node/private/restart.yml +++ b/playbooks/openshift-node/private/restart.yml @@ -16,6 +16,7 @@      until: not (l_docker_restart_docker_in_node_result is failed)      retries: 3      delay: 30 +    when: openshift_node_restart_docker_required | default(True)    - name: Restart containerized services      service: diff --git a/playbooks/openshift-node/redeploy-certificates.yml b/playbooks/openshift-node/redeploy-certificates.yml index 8b7272485..cdf816fbf 100644 --- a/playbooks/openshift-node/redeploy-certificates.yml +++ b/playbooks/openshift-node/redeploy-certificates.yml @@ -4,3 +4,5 @@  - import_playbook: private/redeploy-certificates.yml  - import_playbook: private/restart.yml +  vars: +    openshift_node_restart_docker_required: False diff --git a/playbooks/openstack/inventory.py b/playbooks/openstack/inventory.py index 76e658eb7..d5a8c3e24 100755 --- a/playbooks/openstack/inventory.py +++ b/playbooks/openstack/inventory.py @@ -15,18 +15,10 @@ import json  import shade -def build_inventory(): -    '''Build the dynamic inventory.''' -    cloud = shade.openstack_cloud() - +def base_openshift_inventory(cluster_hosts): +    '''Set the base openshift inventory.'''      inventory = {} -    # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` -    # environment variable. -    cluster_hosts = [ -        server for server in cloud.list_servers() -        if 'metadata' in server and 'clusterid' in server.metadata] -      masters = [server.name for server in cluster_hosts                 if server.metadata['host-type'] == 'master'] @@ -67,6 +59,34 @@ def build_inventory():      inventory['dns'] = {'hosts': dns}      inventory['lb'] = {'hosts': load_balancers} +    return inventory + + +def get_docker_storage_mountpoints(volumes): +    '''Check volumes to see if they're being used for docker storage''' +    docker_storage_mountpoints = {} +    for volume in volumes: +        if volume.metadata.get('purpose') == "openshift_docker_storage": +            for attachment in volume.attachments: +                if attachment.server_id in docker_storage_mountpoints: +                    docker_storage_mountpoints[attachment.server_id].append(attachment.device) +                else: +                    docker_storage_mountpoints[attachment.server_id] = [attachment.device] +    return docker_storage_mountpoints + + +def build_inventory(): +    '''Build the dynamic inventory.''' +    cloud = shade.openstack_cloud() + +    # TODO(shadower): filter the servers based on the `OPENSHIFT_CLUSTER` +    # environment variable. +    cluster_hosts = [ +        server for server in cloud.list_servers() +        if 'metadata' in server and 'clusterid' in server.metadata] + +    inventory = base_openshift_inventory(cluster_hosts) +      for server in cluster_hosts:          if 'group' in server.metadata:              group = server.metadata.group @@ -76,6 +96,9 @@ def build_inventory():      inventory['_meta'] = {'hostvars': {}} +    # cinder volumes used for docker storage +    docker_storage_mountpoints = get_docker_storage_mountpoints(cloud.list_volumes()) +      for server in cluster_hosts:          ssh_ip_address = server.public_v4 or server.private_v4          hostvars = { @@ -111,6 +134,11 @@ def build_inventory():          if node_labels:              hostvars['openshift_node_labels'] = node_labels +        # check for attached docker storage volumes +        if 'os-extended-volumes:volumes_attached' in server: +            if server.id in docker_storage_mountpoints: +                hostvars['docker_storage_mountpoints'] = ' '.join(docker_storage_mountpoints[server.id]) +          inventory['_meta']['hostvars'][server.name] = hostvars      return inventory diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py index 05b2763d5..bfed58011 100644 --- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py +++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py @@ -1138,7 +1138,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py index 324f52689..c78e379d5 100644 --- a/roles/lib_openshift/library/oc_adm_csr.py +++ b/roles/lib_openshift/library/oc_adm_csr.py @@ -1116,7 +1116,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py index 152f270ab..b1b2cb5b5 100644 --- a/roles/lib_openshift/library/oc_adm_manage_node.py +++ b/roles/lib_openshift/library/oc_adm_manage_node.py @@ -1124,7 +1124,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py index 3082f5890..2773201d7 100644 --- a/roles/lib_openshift/library/oc_adm_policy_group.py +++ b/roles/lib_openshift/library/oc_adm_policy_group.py @@ -1110,7 +1110,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py index 92515889b..25cbed8b7 100644 --- a/roles/lib_openshift/library/oc_adm_policy_user.py +++ b/roles/lib_openshift/library/oc_adm_policy_user.py @@ -1124,7 +1124,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py index fe565987c..e26214316 100644 --- a/roles/lib_openshift/library/oc_adm_registry.py +++ b/roles/lib_openshift/library/oc_adm_registry.py @@ -1228,7 +1228,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py index 44de29592..62fca19e5 100644 --- a/roles/lib_openshift/library/oc_adm_router.py +++ b/roles/lib_openshift/library/oc_adm_router.py @@ -1253,7 +1253,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py index 9761b4b4e..0c4bfa01f 100644 --- a/roles/lib_openshift/library/oc_clusterrole.py +++ b/roles/lib_openshift/library/oc_clusterrole.py @@ -1102,7 +1102,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py index 047edffbb..36e6111eb 100644 --- a/roles/lib_openshift/library/oc_configmap.py +++ b/roles/lib_openshift/library/oc_configmap.py @@ -1108,7 +1108,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py index 0cea07256..ab4f153c7 100644 --- a/roles/lib_openshift/library/oc_edit.py +++ b/roles/lib_openshift/library/oc_edit.py @@ -1152,7 +1152,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py index 1f52fba40..f334ddaa4 100644 --- a/roles/lib_openshift/library/oc_env.py +++ b/roles/lib_openshift/library/oc_env.py @@ -1119,7 +1119,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py index 72023eaf7..7e9078339 100644 --- a/roles/lib_openshift/library/oc_group.py +++ b/roles/lib_openshift/library/oc_group.py @@ -1092,7 +1092,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py index 94b08d9ce..e71e2eb5c 100644 --- a/roles/lib_openshift/library/oc_image.py +++ b/roles/lib_openshift/library/oc_image.py @@ -1111,7 +1111,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py index ad837fdb5..ac3279ef8 100644 --- a/roles/lib_openshift/library/oc_label.py +++ b/roles/lib_openshift/library/oc_label.py @@ -1128,7 +1128,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py index 892546e56..ca53c4c97 100644 --- a/roles/lib_openshift/library/oc_obj.py +++ b/roles/lib_openshift/library/oc_obj.py @@ -1131,7 +1131,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py index 38df585f0..877c78d93 100644 --- a/roles/lib_openshift/library/oc_objectvalidator.py +++ b/roles/lib_openshift/library/oc_objectvalidator.py @@ -1063,7 +1063,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py index 70632f86d..507170424 100644 --- a/roles/lib_openshift/library/oc_process.py +++ b/roles/lib_openshift/library/oc_process.py @@ -1120,7 +1120,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py index 4eee748d7..347e879ca 100644 --- a/roles/lib_openshift/library/oc_project.py +++ b/roles/lib_openshift/library/oc_project.py @@ -1117,7 +1117,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py index 2e73a7645..93c96b817 100644 --- a/roles/lib_openshift/library/oc_pvc.py +++ b/roles/lib_openshift/library/oc_pvc.py @@ -1124,7 +1124,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py index e003770d8..3369cf134 100644 --- a/roles/lib_openshift/library/oc_route.py +++ b/roles/lib_openshift/library/oc_route.py @@ -1168,7 +1168,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py index c142f1f43..1b6202a26 100644 --- a/roles/lib_openshift/library/oc_scale.py +++ b/roles/lib_openshift/library/oc_scale.py @@ -1106,7 +1106,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py index 62bda33ad..732299e48 100644 --- a/roles/lib_openshift/library/oc_secret.py +++ b/roles/lib_openshift/library/oc_secret.py @@ -1164,7 +1164,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py index c541e1bbd..a6cf764ff 100644 --- a/roles/lib_openshift/library/oc_service.py +++ b/roles/lib_openshift/library/oc_service.py @@ -1171,7 +1171,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py index 646a39224..90d514292 100644 --- a/roles/lib_openshift/library/oc_serviceaccount.py +++ b/roles/lib_openshift/library/oc_serviceaccount.py @@ -1104,7 +1104,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py index 99a8e8f3d..0d9acac0e 100644 --- a/roles/lib_openshift/library/oc_serviceaccount_secret.py +++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py @@ -1104,7 +1104,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_storageclass.py b/roles/lib_openshift/library/oc_storageclass.py index 7e7d0fa60..6fb5a94e9 100644 --- a/roles/lib_openshift/library/oc_storageclass.py +++ b/roles/lib_openshift/library/oc_storageclass.py @@ -1122,7 +1122,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py index 7bbe38819..feb69348b 100644 --- a/roles/lib_openshift/library/oc_user.py +++ b/roles/lib_openshift/library/oc_user.py @@ -1164,7 +1164,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py index 63adbd6ac..0f024c048 100644 --- a/roles/lib_openshift/library/oc_version.py +++ b/roles/lib_openshift/library/oc_version.py @@ -1076,7 +1076,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py index 3c07f8d4b..6f409f979 100644 --- a/roles/lib_openshift/library/oc_volume.py +++ b/roles/lib_openshift/library/oc_volume.py @@ -1153,7 +1153,7 @@ class Utils(object):  # pragma: no cover          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py index 1fb32164e..9a4ce3509 100644 --- a/roles/lib_openshift/src/lib/base.py +++ b/roles/lib_openshift/src/lib/base.py @@ -314,7 +314,7 @@ class Utils(object):          ''' Actually write the file contents to disk. This helps with mocking. '''          with open(filename, 'w') as sfd: -            sfd.write(contents) +            sfd.write(str(contents))      @staticmethod      def create_tmp_file_from_contents(rname, data, ftype='yaml'): diff --git a/roles/lib_utils/library/docker_creds.py b/roles/lib_utils/library/docker_creds.py index b94c0b779..936fb1c38 100644 --- a/roles/lib_utils/library/docker_creds.py +++ b/roles/lib_utils/library/docker_creds.py @@ -148,10 +148,12 @@ def update_config(docker_config, registry, username, password):  def write_config(module, docker_config, dest):      '''Write updated credentials into dest/config.json''' +    if not isinstance(docker_config, dict): +        docker_config = docker_config.decode()      conf_file_path = os.path.join(dest, 'config.json')      try:          with open(conf_file_path, 'w') as conf_file: -            json.dump(docker_config.decode(), conf_file, indent=8) +            json.dump(docker_config, conf_file, indent=8)      except IOError as ioerror:          result = {'failed': True,                    'changed': False, diff --git a/roles/lib_utils/library/openshift_container_binary_sync.py b/roles/lib_utils/library/openshift_container_binary_sync.py index 440b8ec28..efdfcf1c7 100644 --- a/roles/lib_utils/library/openshift_container_binary_sync.py +++ b/roles/lib_utils/library/openshift_container_binary_sync.py @@ -107,7 +107,7 @@ class BinarySyncer(object):              self._sync_binary('oc')          # Ensure correct symlinks created: -        self._sync_symlink('kubectl', 'openshift') +        self._sync_symlink('kubectl', 'oc')          # Remove old oadm binary          if os.path.exists(os.path.join(self.bin_dir, 'oadm')): diff --git a/roles/nuage_master/handlers/main.yaml b/roles/nuage_master/handlers/main.yaml index 7b55dda56..c0411d641 100644 --- a/roles/nuage_master/handlers/main.yaml +++ b/roles/nuage_master/handlers/main.yaml @@ -1,9 +1,7 @@  ---  - name: restart master api    systemd: name={{ openshift_service_type }}-master-api state=restarted -  when: > -    (openshift_master_ha | bool) and -    (not master_api_service_status_changed | default(false)) +  when: (not master_api_service_status_changed | default(false))  # TODO: need to fix up ignore_errors here  # We retry the controllers because the API may not be 100% initialized yet. @@ -13,7 +11,5 @@    delay: 5    register: result    until: result.rc == 0 -  when: > -    (openshift_master_ha | bool) and -    (not master_controllers_service_status_changed | default(false)) +  when: (not master_controllers_service_status_changed | default(false))    ignore_errors: yes diff --git a/roles/nuage_master/tasks/etcd_certificates.yml b/roles/nuage_master/tasks/etcd_certificates.yml new file mode 100644 index 000000000..99ec27f91 --- /dev/null +++ b/roles/nuage_master/tasks/etcd_certificates.yml @@ -0,0 +1,21 @@ +--- +- name: Generate openshift etcd certs +  become: yes +  include_role: +    name: etcd +    tasks_from: client_certificates +  vars: +    etcd_cert_prefix: nuageEtcd- +    etcd_cert_config_dir: "{{ cert_output_dir }}" +    embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}" +    etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}" +    etcd_cert_subdir: "openshift-nuage-{{ openshift.common.hostname }}" + + +- name: Error if etcd certs are not copied +  stat: +    path: "{{ item }}" +  with_items: +  - "{{ cert_output_dir }}/nuageEtcd-ca.crt" +  - "{{ cert_output_dir }}/nuageEtcd-client.crt" +  - "{{ cert_output_dir }}/nuageEtcd-client.key" diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index 29e16b6f8..a1781dc56 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -81,6 +81,7 @@      - nuage.key      - nuage.kubeconfig +- include_tasks: etcd_certificates.yml  - include_tasks: certificates.yml  - name: Install Nuage VSD user certificate @@ -99,7 +100,16 @@    become: yes    template: src=nuage-node-config-daemonset.j2 dest=/etc/nuage-node-config-daemonset.yaml owner=root mode=0644 -- name: Add the service account to the privileged scc to have root permissions +- name: Create Nuage Infra Pod daemon set yaml file +  become: yes +  template: src=nuage-infra-pod-config-daemonset.j2 dest=/etc/nuage-infra-pod-config-daemonset.yaml owner=root mode=0644 + +- name: Add the service account to the privileged scc to have root permissions for kube-system +  shell: oc adm policy add-scc-to-user privileged system:serviceaccount:kube-system:daemon-set-controller +  ignore_errors: true +  when: inventory_hostname == groups.oo_first_master.0 + +- name: Add the service account to the privileged scc to have root permissions for openshift-infra    shell: oc adm policy add-scc-to-user privileged system:serviceaccount:openshift-infra:daemonset-controller    ignore_errors: true    when: inventory_hostname == groups.oo_first_master.0 @@ -114,6 +124,11 @@    ignore_errors: true    when: inventory_hostname == groups.oo_first_master.0 +- name: Spawn Nuage Infra daemon sets pod +  shell: oc create -f /etc/nuage-infra-pod-config-daemonset.yaml +  ignore_errors: true +  when: inventory_hostname == groups.oo_first_master.0 +  - name: Restart daemons    command: /bin/true    notify: diff --git a/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 new file mode 100755 index 000000000..534a1517f --- /dev/null +++ b/roles/nuage_master/templates/nuage-infra-pod-config-daemonset.j2 @@ -0,0 +1,39 @@ +# This manifest installs Nuage Infra pod on +# each worker node in an Openshift cluster. +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: +  name: nuage-infra-ds +  namespace: kube-system +  labels: +    k8s-app: nuage-infra-ds +spec: +  selector: +    matchLabels: +      k8s-app: nuage-infra-ds +  updateStrategy: +    type: RollingUpdate +  template: +    metadata: +      labels: +        k8s-app: nuage-infra-ds +    spec: +      tolerations: +        - key: node-role.kubernetes.io/master +          effect: NoSchedule +          operator: Exists +      containers: +        # This container spawns a Nuage Infra pod +        # on each worker node +        - name: install-nuage-infra +          image: nuage/infra:{{ nuage_infra_container_image_version }} +          command: ["/install-nuage-infra-pod.sh"] +          securityContext: +            privileged: true +          volumeMounts: +            - mountPath: /var/log +              name: log-dir +      volumes: +        - name: log-dir +          hostPath: +            path: /var/log diff --git a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 index 7be5d6743..3543eeb56 100755 --- a/roles/nuage_master/templates/nuage-master-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-master-config-daemonset.j2 @@ -37,11 +37,14 @@ data:        nuageMonServer:            URL: 0.0.0.0:9443            certificateDirectory: {{ nuage_master_crt_dir }} +          clientCA: "" +          serverCertificate: "" +          serverKey: ""        # etcd config required for HA        etcdClientConfig: -          ca: {{ nuage_master_crt_dir }}/nuageMonCA.crt -          certFile: {{ nuage_master_crt_dir }}/nuageMonServer.crt -          keyFile: {{ nuage_master_crt_dir }}/master.etcd-client.key +          ca: {{ nuage_master_crt_dir }}/nuageEtcd-ca.crt +          certFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.crt +          keyFile: {{ nuage_master_crt_dir }}/nuageEtcd-client.key            urls:        {% for etcd_url in openshift.master.etcd_urls %}                - {{ etcd_url }} diff --git a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 index 6a1267d94..996a2d2b0 100755 --- a/roles/nuage_master/templates/nuage-node-config-daemonset.j2 +++ b/roles/nuage_master/templates/nuage-node-config-daemonset.j2 @@ -61,6 +61,8 @@ spec:    selector:      matchLabels:        k8s-app: nuage-cni-ds +  updateStrategy: +    type: RollingUpdate    template:      metadata:        labels: @@ -104,6 +106,8 @@ spec:              - mountPath: /var/log                name: cni-log-dir              - mountPath: {{ nuage_node_config_dsets_mount_dir }} +              name: var-usr-share-dir +            - mountPath: /usr/share/                name: usr-share-dir        volumes:          - name: cni-bin-dir @@ -121,9 +125,12 @@ spec:          - name: cni-log-dir            hostPath:              path: /var/log -        - name: usr-share-dir +        - name: var-usr-share-dir            hostPath:              path: {{ nuage_node_config_dsets_mount_dir }} +        - name: usr-share-dir +          hostPath: +            path: /usr/share/  --- @@ -164,7 +171,7 @@ spec:              - name: NUAGE_PLATFORM                value: '"kvm, k8s"'              - name: NUAGE_K8S_SERVICE_IPV4_SUBNET -              value: '192.168.0.0\/16' +              value: '172.30.0.0\/16'              - name: NUAGE_NETWORK_UPLINK_INTF                value: "eth0"            volumeMounts: diff --git a/roles/nuage_master/vars/main.yaml b/roles/nuage_master/vars/main.yaml index 114514d7c..5045e1cc5 100644 --- a/roles/nuage_master/vars/main.yaml +++ b/roles/nuage_master/vars/main.yaml @@ -26,9 +26,10 @@ nuage_master_config_dsets_mount_dir: /usr/share/  nuage_node_config_dsets_mount_dir: /usr/share/  nuage_cni_bin_dsets_mount_dir: /opt/cni/bin  nuage_cni_netconf_dsets_mount_dir: /etc/cni/net.d -nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.1.1') }}" -nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.1.1') }}" -nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.1.1') }}" +nuage_monitor_container_image_version: "{{ nuage_monitor_image_version | default('v5.2.1') }}" +nuage_vrs_container_image_version: "{{ nuage_vrs_image_version | default('v5.2.1') }}" +nuage_cni_container_image_version: "{{ nuage_cni_image_version | default('v5.2.1') }}" +nuage_infra_container_image_version: "{{ nuage_infra_image_version | default('v5.2.1') }}"  api_server_url: "{{ hostvars[groups.oo_first_master.0].openshift.master.api_url }}"  nuage_vport_mtu: "{{ nuage_interface_mtu | default('1460') }}"  master_host_type: "{{ master_base_host_type | default('is_rhel_server') }}" diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 178e0849c..1696c2751 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -44,6 +44,8 @@ openshift_aws_s3_bucket_name: "{{ openshift_aws_clusterid }}-docker-registry"  openshift_aws_elb_basename: "{{ openshift_aws_clusterid }}" +openshift_aws_elb_cert_arn: '' +  openshift_aws_elb_dict:    master:      external: @@ -65,7 +67,7 @@ openshift_aws_elb_dict:          load_balancer_port: "{{ openshift_master_api_port | default(8443) }}"          instance_protocol: ssl          instance_port: "{{ openshift_master_api_port | default(8443) }}" -        ssl_certificate_id: '' +        ssl_certificate_id: "{{ openshift_aws_elb_cert_arn }}"        name: "{{ openshift_aws_elb_basename }}-master-external"        tags: "{{ openshift_aws_kube_tags }}"      internal: @@ -320,3 +322,8 @@ openshift_aws_masters_groups: masters,etcd,nodes  # By default, don't delete things like the shared IAM instance  # profile and uploaded ssh keys  openshift_aws_enable_uninstall_shared_objects: False +# S3 bucket names are global by default and can take minutes/hours for the +# name to become available for re-use (assuming someone doesn't take the +# name in the meantime). Default to just emptying the contents of the S3 +# bucket if we've been asked to create the bucket during provisioning. +openshift_aws_really_delete_s3_bucket: False diff --git a/roles/openshift_aws/tasks/uninstall_s3.yml b/roles/openshift_aws/tasks/uninstall_s3.yml new file mode 100644 index 000000000..0b08cbeed --- /dev/null +++ b/roles/openshift_aws/tasks/uninstall_s3.yml @@ -0,0 +1,26 @@ +--- +- name: empty S3 bucket +  block: +  - name: get S3 object list +    aws_s3: +      bucket: "{{ openshift_aws_s3_bucket_name }}" +      mode: list +      region: "{{ openshift_aws_region }}" +    register: s3_out + +  - name: delete S3 objects +    aws_s3: +      bucket: "{{ openshift_aws_s3_bucket_name }}" +      mode: delobj +      object: "{{ item }}" +    with_items: "{{ s3_out.s3_keys }}" +  when: openshift_aws_create_s3 | bool + +- name: delete S3 bucket +  aws_s3: +    bucket: "{{ openshift_aws_s3_bucket_name }}" +    mode: delete +    region: "{{ openshift_aws_region }}" +  when: +  - openshift_aws_create_s3 | bool +  - openshift_aws_really_delete_s3_bucket | bool diff --git a/roles/openshift_certificate_expiry/examples/playbooks b/roles/openshift_certificate_expiry/examples/playbooks index 586afb0d5..751c3d14e 120000 --- a/roles/openshift_certificate_expiry/examples/playbooks +++ b/roles/openshift_certificate_expiry/examples/playbooks @@ -1 +1 @@ -../../../playbooks/certificate_expiry
\ No newline at end of file +../../../playbooks/openshift-checks/certificate_expiry
\ No newline at end of file diff --git a/roles/openshift_cloud_provider/defaults/main.yml b/roles/openshift_cloud_provider/defaults/main.yml new file mode 100644 index 000000000..37cbf5603 --- /dev/null +++ b/roles/openshift_cloud_provider/defaults/main.yml @@ -0,0 +1,4 @@ +--- +openshift_gcp_project: '' +openshift_gcp_prefix: '' +openshift_gcp_network_name: "{{ openshift_gcp_prefix }}network" diff --git a/roles/openshift_cloud_provider/tasks/gce.yml b/roles/openshift_cloud_provider/tasks/gce.yml index 395bd304c..9e1c31b1d 100644 --- a/roles/openshift_cloud_provider/tasks/gce.yml +++ b/roles/openshift_cloud_provider/tasks/gce.yml @@ -1,4 +1,12 @@  --- +- name: check variables are passed +  fail: +    msg: "Ensure correct variables are defined for gcp. {{ item }}" +  when: item == '' +  with_items: +  - "{{ openshift_gcp_project }}" +  - "{{ openshift_gcp_prefix }}" +  # Work around ini_file create option in 2.2 which defaults to no  - name: Create cloud config file    file: @@ -16,8 +24,8 @@      option: "{{ item.key }}"      value: "{{ item.value }}"    with_items: -    - { key: 'project-id', value: '{{ openshift_gcp_project }}' } -    - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } -    - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } -    - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } -    - { key: 'multizone', value: 'false' } +  - { key: 'project-id', value: '{{ openshift_gcp_project }}' } +  - { key: 'network-name', value: '{{ openshift_gcp_network_name }}' } +  - { key: 'node-tags', value: '{{ openshift_gcp_prefix }}ocp' } +  - { key: 'node-instance-prefix', value: '{{ openshift_gcp_prefix }}' } +  - { key: 'multizone', value: 'false' } diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml index 014c06641..687d60171 100644 --- a/roles/openshift_default_storage_class/defaults/main.yml +++ b/roles/openshift_default_storage_class/defaults/main.yml @@ -1,4 +1,7 @@  --- +# Must not be blank if you're using vsphere +openshift_cloudprovider_vsphere_datacenter: '' +  openshift_storageclass_defaults:    aws:      provisioner: aws-ebs @@ -19,6 +22,12 @@ openshift_storageclass_defaults:      parameters:        fstype: xfs +  vsphere: +    provisioner: vsphere-volume +    name: standard +    parameters: +      datastore: "{{ openshift_cloudprovider_vsphere_datacenter }}" +  openshift_storageclass_default: "true"  openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"  openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}" diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index b39c44b01..7223a5afe 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -35,7 +35,7 @@    mount:      state: mounted      fstype: glusterfs -    src: "{% if 'glusterfs_registry' in groups %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift_hosted_registry_storage_glusterfs_path }}" +    src: "{% if 'glusterfs_registry' in groups and groups['glusterfs_registry'] | length > 0  %}{% set node = groups.glusterfs_registry[0] %}{% elif 'glusterfs' in groups and groups['glusterfs'] | length > 0 %}{% set node = groups.glusterfs[0] %}{% endif %}{% if openshift_hosted_registry_storage_glusterfs_ips is defined and openshift_hosted_registry_storage_glusterfs_ips|length > 0 %}{{ openshift_hosted_registry_storage_glusterfs_ips[0] }}{% elif 'glusterfs_hostname' in hostvars[node] %}{{ hostvars[node].glusterfs_hostname }}{% elif 'openshift' in hostvars[node] %}{{ hostvars[node].openshift.node.nodename }}{% else %}{{ node }}{% endif %}:/{{ openshift.hosted.registry.storage.glusterfs.path }}"      name: "{{ mktemp.stdout }}"  - name: Set registry volume permissions diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md index a192bd67e..c438236a4 100644 --- a/roles/openshift_logging/README.md +++ b/roles/openshift_logging/README.md @@ -58,6 +58,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin  - `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.  - `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.  - `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect. +- `openshift_logging_kibana_env_vars`: A map of environment variables to add to the kibana deployment config (e.g. {"ELASTICSEARCH_REQUESTTIMEOUT":"30000"})  - `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.  - `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'. diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index ced7397b5..6be47b1f8 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -140,4 +140,6 @@      console_config_edits:        - key: clusterInfo#loggingPublicURL          value: "" -  when: openshift_web_console_install | default(true) | bool +  when: +    - openshift_web_console_install | default(true) | bool +    - openshift.common.version_gte_3_9 diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index e4883bfa0..c905502ac 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -71,10 +71,17 @@  - set_fact: openshift_logging_es_pvc_prefix="logging-es"    when: openshift_logging_es_pvc_prefix == "" +# Using this module for setting this fact because otherwise we were getting a value of "" trying to +# use default() in the set_fact after this which caused us to not correctly evaluate +# openshift_logging_elasticsearch_storage_type +- conditional_set_fact: +    facts: "{{ hostvars[inventory_hostname] }}" +    vars: +      elasticsearch_storage_type: openshift_logging_elasticsearch_storage_type +  - set_fact: -    elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0)  else 'emptydir') }}" +    default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_pvc_size | length > 0)  else 'emptydir' }}" -# We don't allow scaling down of ES nodes currently  - include_role:      name: openshift_logging_elasticsearch    vars: @@ -85,7 +92,8 @@      openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"      openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" -    openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" +    openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}" +    openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}"      openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"      openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}"      openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}" @@ -112,7 +120,7 @@      openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"      openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}" -    openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" +    openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}"      openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"      openshift_logging_elasticsearch_pvc_storage_class_name: "{{ openshift_logging_es_pvc_storage_class_name | default() }}" @@ -133,7 +141,7 @@    when: openshift_logging_es_ops_pvc_prefix == ""  - set_fact: -    elasticsearch_storage_type: "{{ openshift_logging_elasticsearch_storage_type | default('pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0)  else 'emptydir') }}" +    default_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_ops_pvc_dynamic | bool or openshift_logging_storage_kind | default('') == 'nfs' or openshift_logging_es_ops_pvc_size | length > 0)  else 'emptydir' }}"    when:    - openshift_logging_use_ops | bool @@ -147,7 +155,8 @@      openshift_logging_elasticsearch_ops_deployment: true      openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" -    openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" +    openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default('pvc' if outer_item.0.volumes['elasticsearch-storage'].persistentVolumeClaim is defined else 'hostmount' if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else 'emptydir' if outer_item.0.volumes['elasticsearch-storage'].emptyDir is defined else default_elasticsearch_storage_type) }}" +    openshift_logging_elasticsearch_hostmount_path: "{{ outer_item.0.volumes['elasticsearch-storage'].hostPath.path if outer_item.0.volumes['elasticsearch-storage'].hostPath is defined else '' }}"      openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"      openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"      openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" @@ -189,7 +198,7 @@      openshift_logging_elasticsearch_ops_deployment: true      openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}" -    openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}" +    openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type | default(default_elasticsearch_storage_type) }}"      openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"      openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic }}"      openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}" @@ -326,4 +335,6 @@      console_config_edits:      - key: clusterInfo#loggingPublicURL        value: "https://{{ openshift_logging_kibana_hostname }}" -  when: openshift_web_console_install | default(true) | bool +  when: +  - openshift_web_console_install | default(true) | bool +  - openshift.common.version_gte_3_9 diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index ff5ad1045..b731d93a0 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -137,6 +137,16 @@      - "prometheus_out.stderr | length > 0"      - "'already exists' not in prometheus_out.stderr" +- set_fact: +    _logging_metrics_proxy_passwd: "{{ 16 | lib_utils_oo_random_word | b64encode }}" + +- template: +    src: passwd.j2 +    dest: "{{mktemp.stdout}}/passwd.yml" +  vars: +    logging_user_name: "{{ openshift_logging_elasticsearch_prometheus_sa }}" +    logging_user_passwd: "{{ _logging_metrics_proxy_passwd }}" +  # View role and binding  - name: Generate logging-elasticsearch-view-role    template: @@ -255,6 +265,8 @@          path: "{{ generated_certs_dir }}/ca.crt"        - name: admin.jks          path: "{{ generated_certs_dir }}/system.admin.jks" +      - name: passwd.yml +        path: "{{mktemp.stdout}}/passwd.yml"  # services  - name: Set logging-{{ es_component }}-cluster service @@ -391,6 +403,7 @@      es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}"      deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"      es_replicas: 1 +    basic_auth_passwd: "{{ _logging_metrics_proxy_passwd | b64decode }}"  - name: Set ES dc    oc_obj: diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index 4b189f255..b1d6a4489 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -51,6 +51,7 @@ spec:             - -client-id={{openshift_logging_elasticsearch_prometheus_sa}}             - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token             - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }} +           - -basic-auth-password={{ basic_auth_passwd }}             - -upstream=https://localhost:9200             - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'             - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' diff --git a/roles/openshift_logging_elasticsearch/templates/passwd.j2 b/roles/openshift_logging_elasticsearch/templates/passwd.j2 new file mode 100644 index 000000000..a22151eef --- /dev/null +++ b/roles/openshift_logging_elasticsearch/templates/passwd.j2 @@ -0,0 +1,2 @@ +"{{logging_user_name}}": +  passwd: "{{logging_user_passwd}}" diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml index 899193838..b69cbacae 100644 --- a/roles/openshift_logging_kibana/defaults/main.yml +++ b/roles/openshift_logging_kibana/defaults/main.yml @@ -18,6 +18,9 @@ openshift_logging_kibana_es_port: 9200  openshift_logging_kibana_replicas: 1  openshift_logging_kibana_edge_term_policy: Redirect +# map of env. var to add to the kibana deploymentconfig +openshift_logging_kibana_env_vars: {} +  # this is used to determine if this is an operations deployment or a non-ops deployment  # simply used for naming purposes  openshift_logging_kibana_ops_deployment: false diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 3c3bd902e..c67235c62 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -251,6 +251,7 @@      kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"      kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"      kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}" +    kibana_env_vars: "{{ openshift_logging_kibana_env_vars | default({}) }}"  - name: Set Kibana DC    oc_obj: diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2 index 57d216373..ed05b8458 100644 --- a/roles/openshift_logging_kibana/templates/kibana.j2 +++ b/roles/openshift_logging_kibana/templates/kibana.j2 @@ -70,6 +70,10 @@ spec:                  resourceFieldRef:                    containerName: kibana                    resource: limits.memory +{% for key, value in kibana_env_vars.items() %} +            - name: "{{ key }}" +              value: "{{ value }}" +{% endfor %}            volumeMounts:              - name: kibana                mountPath: /etc/kibana/keys diff --git a/roles/openshift_manage_node/defaults/main.yml b/roles/openshift_manage_node/defaults/main.yml index 00e04b9f2..b7a89a723 100644 --- a/roles/openshift_manage_node/defaults/main.yml +++ b/roles/openshift_manage_node/defaults/main.yml @@ -1,9 +1,5 @@  ---  # openshift_manage_node_is_master is set at the play level.  openshift_manage_node_is_master: False - -# Default is to be schedulable except for master nodes. -l_openshift_manage_schedulable: "{{ openshift_schedulable | default(not openshift_manage_node_is_master) }}" -  openshift_master_node_labels:    node-role.kubernetes.io/master: 'true' diff --git a/roles/openshift_manage_node/tasks/config.yml b/roles/openshift_manage_node/tasks/config.yml index 4f00351b5..e5753d185 100644 --- a/roles/openshift_manage_node/tasks/config.yml +++ b/roles/openshift_manage_node/tasks/config.yml @@ -2,7 +2,7 @@  - name: Set node schedulability    oc_adm_manage_node:      node: "{{ openshift.node.nodename | lower }}" -    schedulable: "{{ 'true' if l_openshift_manage_schedulable | bool else 'false' }}" +    schedulable: "{{ 'true' if openshift_schedulable | default(true) | bool else 'false' }}"    retries: 10    delay: 5    register: node_schedulable @@ -23,5 +23,5 @@    delegate_to: "{{ openshift_master_host }}"    vars:      l_node_labels: "{{ openshift_node_labels | default({}) }}" -    l_master_labels: "{{ ('oo_masters_to_config' in group_names) | ternary(openshift_master_node_labels, {}) }}" +    l_master_labels: "{{ openshift_manage_node_is_master | ternary(openshift_master_node_labels, {}) }}"      l_all_labels: "{{ l_node_labels | combine(l_master_labels) }}" diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index b5e234b7f..57bc97e3e 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -15,6 +15,8 @@ openshift_management_pod_rollout_retries: 30  #  # Choose 'miq-template' for a podified database install  # Choose 'miq-template-ext-db' for an external database install +# TODO: Swap this var declaration once CFME is fully supported +#openshift_management_app_template: "{{ 'cfme-template' if openshift_deployment_type == 'openshift-enterprise' else 'miq-template' }}"  openshift_management_app_template: miq-template  # If you are using the miq-template-ext-db template then you must add  # the required database parameters to the diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml index e45ea8d43..80318fec0 100644 --- a/roles/openshift_management/tasks/accounts.yml +++ b/roles/openshift_management/tasks/accounts.yml @@ -5,14 +5,14 @@    oc_serviceaccount:      namespace: "{{ openshift_management_project }}"      state: present -    name: "{{ openshift_management_flavor_short }}{{ item.name }}" +    name: "{{ __openshift_management_flavor_short }}{{ item.name }}"    with_items:      - "{{ __openshift_system_account_sccs }}"  - name: Ensure the CFME system accounts have all the required SCCs    oc_adm_policy_user:      namespace: "{{ openshift_management_project }}" -    user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" +    user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}"      resource_kind: scc      resource_name: "{{ item.resource_name }}"    with_items: @@ -21,7 +21,7 @@  - name: Ensure the CFME system accounts have the required roles    oc_adm_policy_user:      namespace: "{{ openshift_management_project }}" -    user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}" +    user: "system:serviceaccount:{{ openshift_management_project }}:{{ __openshift_management_flavor_short }}{{ item.name }}"      resource_kind: role      resource_name: "{{ item.resource_name }}"    with_items: diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index c4b204b98..5209eba56 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -71,15 +71,15 @@  # CREATE APP  - name: Note the correct ext-db template name    set_fact: -    openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db" +    openshift_management_template_name: "{{ __openshift_management_flavor }}-ext-db"    when: -    - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] +    - __openshift_management_use_ext_db  - name: Note the correct podified db template name    set_fact: -    openshift_management_template_name: "{{ openshift_management_flavor }}" +    openshift_management_template_name: "{{ __openshift_management_flavor }}"    when: -    - openshift_management_app_template in ['miq-template', 'cfme-template'] +    - not __openshift_management_use_ext_db  - name: Ensure the Management App is created    oc_process: @@ -89,7 +89,7 @@      params: "{{ openshift_management_template_parameters }}"  - name: Wait for the app to come up. May take several minutes, 30s check intervals, {{ openshift_management_pod_rollout_retries }} retries -  command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" +  command: "oc logs {{ __openshift_management_flavor }}-0 -n {{ openshift_management_project }}"    register: app_seeding_logs    until: app_seeding_logs.stdout.find('Server starting complete') != -1    delay: 30 diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index d1b9a8d5c..1f8cac6c6 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -12,7 +12,7 @@    when:      - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db    block:      - name: Note the DB PV Size from Template Parameters        set_fact: @@ -31,7 +31,7 @@      namespace: "{{ openshift_management_project }}"      state: list      kind: pv -    name: "{{ openshift_management_flavor_short }}-app" +    name: "{{ __openshift_management_flavor_short }}-app"    register: miq_app_pv_check  - name: Check if the Management DB PV has been created @@ -39,15 +39,15 @@      namespace: "{{ openshift_management_project }}"      state: list      kind: pv -    name: "{{ openshift_management_flavor_short }}-db" +    name: "{{ __openshift_management_flavor_short }}-db"    register: miq_db_pv_check    when: -    - openshift_management_app_template in ['miq-template', 'cfme-template'] +    - not __openshift_management_use_ext_db  - name: Ensure the Management App PV is created    oc_process:      namespace: "{{ openshift_management_project }}" -    template_name: "{{ openshift_management_flavor }}-app-pv" +    template_name: "{{ __openshift_management_flavor }}-app-pv"      create: True      params:        PV_SIZE: "{{ openshift_management_app_pv_size }}" @@ -58,12 +58,12 @@  - name: Ensure the Management DB PV is created    oc_process:      namespace: "{{ openshift_management_project }}" -    template_name: "{{ openshift_management_flavor }}-db-pv" +    template_name: "{{ __openshift_management_flavor }}-db-pv"      create: True      params:        PV_SIZE: "{{ openshift_management_db_pv_size }}"        BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}"        NFS_HOST: "{{ openshift_management_nfs_server }}"    when: -    - openshift_management_app_template in ['miq-template', 'cfme-template'] +    - not __openshift_management_use_ext_db      - miq_db_pv_check.results.results == [{}] diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 9e3a4d43a..4a00efb1d 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -17,8 +17,8 @@          tasks_from: create_export        vars:          l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" -        l_nfs_export_config: "{{ openshift_management_flavor_short }}" -        l_nfs_export_name: "{{ openshift_management_flavor_short }}-app" +        l_nfs_export_config: "{{ __openshift_management_flavor_short }}" +        l_nfs_export_name: "{{ __openshift_management_flavor_short }}-app"          l_nfs_options: "*(rw,no_root_squash,no_wdelay)"      - name: Create the DB export @@ -27,10 +27,10 @@          tasks_from: create_export        vars:          l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}" -        l_nfs_export_config: "{{ openshift_management_flavor_short }}" -        l_nfs_export_name: "{{ openshift_management_flavor_short }}-db" +        l_nfs_export_config: "{{ __openshift_management_flavor_short }}" +        l_nfs_export_name: "{{ __openshift_management_flavor_short }}-db"          l_nfs_options: "*(rw,no_root_squash,no_wdelay)"        when: -        - openshift_management_app_template in ['miq-template', 'cfme-template'] +        - not __openshift_management_use_ext_db    delegate_to: "{{ openshift_management_nfs_server }}" diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 9f97cdcb9..f40af7349 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -13,59 +13,59 @@  ######################################################################  # STANDARD PODIFIED DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db    block:    - name: Check if the Management Server template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list        kind: template -      name: "{{ openshift_management_flavor }}" +      name: "{{ __openshift_management_flavor }}"      register: miq_server_check    - when: miq_server_check.results.results == [{}]      block:      - name: Copy over Management Server template        copy: -        src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" +        src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-template.yaml"          dest: "{{ template_dir }}/"      - name: Ensure Management Server Template is created        oc_obj:          namespace: "{{ openshift_management_project }}" -        name: "{{ openshift_management_flavor }}" +        name: "{{ __openshift_management_flavor }}"          state: present          kind: template          files: -        - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml" +        - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template.yaml"  ######################################################################  # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] +- when: __openshift_management_use_ext_db    block:    - name: Check if the Management Ext-DB Server template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list        kind: template -      name: "{{ openshift_management_flavor }}-ext-db" +      name: "{{ __openshift_management_flavor }}-ext-db"      register: miq_ext_db_server_check    - when: miq_ext_db_server_check.results.results == [{}]      block:      - name: Copy over Management Ext-DB Server template        copy: -        src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" +        src: "templates/{{ __openshift_management_flavor }}/{{__openshift_management_flavor_short}}-template-ext-db.yaml"          dest: "{{ template_dir }}/"      - name: Ensure Management Ext-DB Server Template is created        oc_obj:          namespace: "{{ openshift_management_project }}" -        name: "{{ openshift_management_flavor }}-ext-db" +        name: "{{ __openshift_management_flavor }}-ext-db"          state: present          kind: template          files: -        - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml" +        - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-template-ext-db.yaml"  # End app template creation.  ###################################################################### @@ -79,50 +79,50 @@      namespace: "{{ openshift_management_project }}"      state: list      kind: template -    name: "{{ openshift_management_flavor }}-app-pv" +    name: "{{ __openshift_management_flavor }}-app-pv"    register: miq_app_pv_check  - when: miq_app_pv_check.results.results == [{}]    block:    - name: Copy over Management App PV template      copy: -      src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" +      src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml"        dest: "{{ template_dir }}/"    - name: Ensure Management App PV Template is created      oc_obj:        namespace: "{{ openshift_management_project }}" -      name: "{{ openshift_management_flavor }}-app-pv" +      name: "{{ __openshift_management_flavor }}-app-pv"        state: present        kind: template        files: -      - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" +      - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-server-example.yaml"  #---------------------------------------------------------------------  # Required for database if the installation is fully podified -- when: openshift_management_app_template in ['miq-template', 'cfme-template'] +- when: not __openshift_management_use_ext_db    block:    - name: Check if the Management DB PV template has been created already      oc_obj:        namespace: "{{ openshift_management_project }}"        state: list        kind: template -      name: "{{ openshift_management_flavor }}-db-pv" +      name: "{{ __openshift_management_flavor }}-db-pv"      register: miq_db_pv_check    - when: miq_db_pv_check.results.results == [{}]      block:      - name: Copy over Management DB PV template        copy: -        src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" +        src: "templates/{{ __openshift_management_flavor }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml"          dest: "{{ template_dir }}/"      - name: Ensure Management DB PV Template is created        oc_obj:          namespace: "{{ openshift_management_project }}" -        name: "{{ openshift_management_flavor }}-db-pv" +        name: "{{ __openshift_management_flavor }}-db-pv"          state: present          kind: template          files: -        - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" +        - "{{ template_dir }}/{{ __openshift_management_flavor_short }}-pv-db-example.yaml" diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml index b22f36a4f..2dc895190 100644 --- a/roles/openshift_management/tasks/validate.yml +++ b/roles/openshift_management/tasks/validate.yml @@ -100,4 +100,4 @@        'openshift_management_template_parameters'"    with_items: "{{ __openshift_management_required_db_conn_params }}"    when: -    - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] +    - __openshift_management_use_ext_db diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml index da3ad0af7..d7b18df3a 100644 --- a/roles/openshift_management/vars/main.yml +++ b/roles/openshift_management/vars/main.yml @@ -30,14 +30,18 @@ __openshift_management_db_parameters:    - DATABASE_PORT    - DATABASE_NAME -# # Commented out until we can support both CFME and MIQ -# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}" -#openshift_management_flavor: cloudforms -openshift_management_flavor: manageiq -# TODO: Make this conditional as well based on the prior variable -# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}" -# openshift_management_flavor_short: cfme -openshift_management_flavor_short: miq +__openshift_management_flavors: +  miq: +    short: miq +    long: manageiq +  cfme: +    short: cfme +    long: cloudforms + +__openshift_management_flavor: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['long'] }}" +__openshift_management_flavor_short: "{{ __openshift_management_flavors[openshift_management_app_template.split('-')[0]]['short'] }}" + +__openshift_management_use_ext_db: "{{ true if 'ext-db' in openshift_management_app_template else false }}"  ######################################################################  # ACCOUNTING diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index b12a6b346..680e4a4ff 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -137,17 +137,8 @@    - item.clientCA | default('') != ''    with_items: "{{ openshift.master.identity_providers }}" -# This is an ugly hack to verify settings are in a file without modifying them with lineinfile. -# The template file will stomp any other settings made. -- block: -  - name: check whether our docker-registry setting exists in the env file -    command: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master" -    failed_when: false -    changed_when: false -    register: l_already_set - -  - set_fact: -      openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" +- name: Include push_via_dns.yml +  include_tasks: push_via_dns.yml  - name: Set fact of all etcd host IPs    openshift_facts: @@ -227,7 +218,7 @@  - pause:      seconds: 15    when: -  - openshift.master.ha | bool +  - openshift_master_ha | bool  - name: Start and enable master api all masters    systemd: diff --git a/roles/openshift_master/tasks/push_via_dns.yml b/roles/openshift_master/tasks/push_via_dns.yml new file mode 100644 index 000000000..c5876130a --- /dev/null +++ b/roles/openshift_master/tasks/push_via_dns.yml @@ -0,0 +1,13 @@ +--- +# This is an ugly hack to verify settings are in a file without modifying them with lineinfile. +# The template file will stomp any other settings made. +- when: openshift_push_via_dns is not defined +  block: +  - name: check whether our docker-registry setting exists in the env file +    shell: "awk '/^OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000/' /etc/sysconfig/{{ openshift_service_type }}-master*" +    failed_when: false +    changed_when: false +    register: l_already_set + +  - set_fact: +      openshift_push_via_dns: "{{ openshift.common.version_gte_3_6 or (l_already_set.stdout is defined and l_already_set.stdout is match('OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000')) }}" diff --git a/roles/openshift_master/tasks/restart.yml b/roles/openshift_master/tasks/restart.yml index 715347101..f7697067a 100644 --- a/roles/openshift_master/tasks/restart.yml +++ b/roles/openshift_master/tasks/restart.yml @@ -3,7 +3,6 @@    service:      name: "{{ openshift_service_type }}-master-api"      state: restarted -  when: openshift_master_ha | bool  - name: Wait for master API to come back online    wait_for:      host: "{{ openshift.common.hostname }}" @@ -11,12 +10,10 @@      delay: 10      port: "{{ openshift.master.api_port }}"      timeout: 600 -  when: openshift_master_ha | bool -- name: Restart master controllers -  service: -    name: "{{ openshift_service_type }}-master-controllers" -    state: restarted -  # Ignore errrors since it is possible that type != simple for -  # pre-3.1.1 installations. -  ignore_errors: true -  when: openshift_master_ha | bool +# We retry the controllers because the API may not be 100% initialized yet. +- name: restart master controllers +  command: "systemctl restart {{ openshift_service_type }}-master-controllers" +  retries: 3 +  delay: 5 +  register: result +  until: result.rc == 0 diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 870ab7c57..aeff64983 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -1,6 +1,8 @@  ---  # systemd_units.yml is included both in the openshift_master role and in the upgrade  # playbooks. +- name: include push_via_dns.yml tasks +  include_tasks: push_via_dns.yml  - name: Set HA Service Info for containerized installs    set_fact: @@ -9,7 +11,8 @@    when:    - openshift_is_containerized | bool -- include_tasks: registry_auth.yml +- name: include registry_auth tasks +  include_tasks: registry_auth.yml  - name: Disable the legacy master service if it exists    systemd: diff --git a/roles/openshift_metrics/defaults/main.yaml b/roles/openshift_metrics/defaults/main.yaml index 8da74430f..293d8f451 100644 --- a/roles/openshift_metrics/defaults/main.yaml +++ b/roles/openshift_metrics/defaults/main.yaml @@ -54,7 +54,7 @@ openshift_metrics_master_url: https://kubernetes.default.svc  openshift_metrics_node_id: nodename  openshift_metrics_project: openshift-infra -openshift_metrics_cassandra_pvc_prefix: "{{ openshift_metrics_storage_volume_name | default('metrics-cassandra') }}" +openshift_metrics_cassandra_pvc_prefix: metrics-cassandra  openshift_metrics_cassandra_pvc_access: "{{ openshift_metrics_storage_access_modes | default(['ReadWriteOnce']) }}"  openshift_metrics_hawkular_user_write_access: False diff --git a/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml new file mode 100644 index 000000000..6aa48f9c3 --- /dev/null +++ b/roles/openshift_metrics/tasks/generate_cassandra_pvcs.yaml @@ -0,0 +1,46 @@ +--- +- name: Check to see if PVC already exists +  oc_obj: +    state: list +    kind: pvc +    name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" +    namespace: "{{ openshift_metrics_project }}" +  register: _metrics_pvc + +# _metrics_pvc.results.results | length > 0 returns a false positive +# so we check for the presence of 'stderr' to determine if the obj exists or not +# the RC for existing and not existing is both 0 +- when: +    - _metrics_pvc.results.stderr is defined +  block: +    - name: generate hawkular-cassandra persistent volume claims +      template: +        src: pvc.j2 +        dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml" +      vars: +        obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" +        labels: +          metrics-infra: hawkular-cassandra +        access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" +        size: "{{ openshift_metrics_cassandra_pvc_size }}" +        pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" +        storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" +      when: +        - openshift_metrics_cassandra_storage_type != 'emptydir' +        - openshift_metrics_cassandra_storage_type != 'dynamic' +      changed_when: false + +    - name: generate hawkular-cassandra persistent volume claims (dynamic) +      template: +        src: pvc.j2 +        dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ metrics_pvc_index }}.yaml" +      vars: +        obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ metrics_pvc_index }}" +        labels: +          metrics-infra: hawkular-cassandra +        access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" +        size: "{{ openshift_metrics_cassandra_pvc_size }}" +        pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" +        storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" +      when: openshift_metrics_cassandra_storage_type == 'dynamic' +      changed_when: false diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 9026cc897..158e596ec 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -25,36 +25,7 @@  - set_fact: openshift_metrics_cassandra_pvc_prefix="hawkular-metrics"    when: "not openshift_metrics_cassandra_pvc_prefix or openshift_metrics_cassandra_pvc_prefix == ''" -- name: generate hawkular-cassandra persistent volume claims -  template: -    src: pvc.j2 -    dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml" -  vars: -    obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}" -    labels: -      metrics-infra: hawkular-cassandra -    access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" -    size: "{{ openshift_metrics_cassandra_pvc_size }}" -    pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" -    storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" -  with_sequence: count={{ openshift_metrics_cassandra_replicas }} -  when: -  - openshift_metrics_cassandra_storage_type != 'emptydir' -  - openshift_metrics_cassandra_storage_type != 'dynamic' -  changed_when: false - -- name: generate hawkular-cassandra persistent volume claims (dynamic) -  template: -    src: pvc.j2 -    dest: "{{ mktemp.stdout }}/templates/hawkular-cassandra-pvc{{ item }}.yaml" -  vars: -    obj_name: "{{ openshift_metrics_cassandra_pvc_prefix }}-{{ item }}" -    labels: -      metrics-infra: hawkular-cassandra -    access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}" -    size: "{{ openshift_metrics_cassandra_pvc_size }}" -    pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}" -    storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}" +- include_tasks: generate_cassandra_pvcs.yaml    with_sequence: count={{ openshift_metrics_cassandra_replicas }} -  when: openshift_metrics_cassandra_storage_type == 'dynamic' -  changed_when: false +  loop_control: +    loop_var: metrics_pvc_index diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 6b6c21d71..f05c8968d 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -79,7 +79,9 @@      console_config_edits:        - key: clusterInfo#metricsPublicURL          value: "https://{{ openshift_metrics_hawkular_hostname}}/hawkular/metrics" -  when: openshift_web_console_install | default(true) | bool +  when: +    - openshift_web_console_install | default(true) | bool +    - openshift.common.version_gte_3_9  - command: >      {{openshift_client_binary}} diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 1664e9975..ed849916d 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -28,4 +28,6 @@      console_config_edits:        - key: clusterInfo#metricsPublicURL          value: "" -  when: openshift_web_console_install | default(true) | bool +  when: +    - openshift_web_console_install | default(true) | bool +    - openshift.common.version_gte_3_9 diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index 0fe4c2035..9f887891b 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -137,6 +137,7 @@ default_r_openshift_node_image_prep_packages:  - yum-utils  # gluster  - glusterfs-fuse +- device-mapper-multipath  # nfs  - nfs-utils  - flannel diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index a8048c42f..e31433dbc 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -1,6 +1,33 @@  ---  - name: Install iSCSI storage plugin dependencies -  package: name=iscsi-initiator-utils state=present +  package: +    name: "{{ item }}" +    state: present    when: not openshift_is_atomic | bool    register: result    until: result is succeeded +  with_items: +    - iscsi-initiator-utils +    - device-mapper-multipath + +- name: restart services +  systemd: +    name: "{{ item }}" +    state: started +    enabled: True +  when: not openshift_is_atomic | bool +  with_items: +    - multipathd +    - rpcbind + +- name: Template multipath configuration +  template: +    dest: "/etc/multipath.conf" +    src: multipath.conf.j2 +    backup: true +  when: not openshift_is_atomic | bool + +#enable multipath +- name: Enable multipath +  command: "mpathconf --enable" +  when: not openshift_is_atomic | bool diff --git a/roles/openshift_node/templates/multipath.conf.j2 b/roles/openshift_node/templates/multipath.conf.j2 new file mode 100644 index 000000000..8a0abc2c1 --- /dev/null +++ b/roles/openshift_node/templates/multipath.conf.j2 @@ -0,0 +1,15 @@ +# LIO iSCSI +# TODO: Add env variables for tweaking +devices { +        device { +                vendor "LIO-ORG" +                user_friendly_names "yes"  +                path_grouping_policy "failover" +                path_selector "round-robin 0" +                failback immediate +                path_checker "tur" +                prio "const" +                no_path_retry 120 +                rr_weight "uniform" +        } +} diff --git a/roles/openshift_openstack/defaults/main.yml b/roles/openshift_openstack/defaults/main.yml index 77be1f2b1..2bdb81632 100644 --- a/roles/openshift_openstack/defaults/main.yml +++ b/roles/openshift_openstack/defaults/main.yml @@ -93,3 +93,8 @@ openshift_openstack_node_volume_size: "{{ openshift_openstack_docker_volume_size  openshift_openstack_etcd_volume_size: 2  openshift_openstack_lb_volume_size: 5  openshift_openstack_ephemeral_volumes: false + + +# cloud-config +openshift_openstack_disable_root: true +openshift_openstack_user: openshift diff --git a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 index 32c6b5838..9015c561f 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-dm.j2 @@ -1,4 +1,8 @@ +{% if docker_storage_mountpoints is defined %} +DEVS="{{ docker_storage_mountpoints }}" +{% else %}  DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +{% endif %}  VG="{{ openshift_openstack_container_storage_setup.docker_vg }}"  DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}"  EXTRA_DOCKER_STORAGE_OPTIONS="--storage-opt dm.basesize={{ openshift_openstack_container_storage_setup.docker_dm_basesize }}" diff --git a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 index 1bf366bdc..917347073 100644 --- a/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 +++ b/roles/openshift_openstack/templates/docker-storage-setup-overlayfs.j2 @@ -1,4 +1,8 @@ +{% if docker_storage_mountpoints is defined %} +DEVS="{{ docker_storage_mountpoints }}" +{% else %}  DEVS="{{ openshift_openstack_container_storage_setup.docker_dev }}" +{% endif %}  VG="{{ openshift_openstack_container_storage_setup.docker_vg }}"  DATA_SIZE="{{ openshift_openstack_container_storage_setup.docker_data_size }}"  STORAGE_DRIVER=overlay2 diff --git a/roles/openshift_openstack/templates/heat_stack.yaml.j2 b/roles/openshift_openstack/templates/heat_stack.yaml.j2 index 8e7c6288a..1d3173022 100644 --- a/roles/openshift_openstack/templates/heat_stack.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack.yaml.j2 @@ -418,6 +418,10 @@ resources:            protocol: tcp            port_range_min: 443            port_range_max: 443 +        - direction: ingress +          protocol: tcp +          port_range_min: 1936 +          port_range_max: 1936    cns-secgrp:      type: OS::Neutron::SecurityGroup diff --git a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 index 29b09f3c9..9aeecfa74 100644 --- a/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 +++ b/roles/openshift_openstack/templates/heat_stack_server.yaml.j2 @@ -261,11 +261,12 @@ resources:      properties:        size: { get_param: volume_size }        availability_zone: { get_param: availability_zone } +      metadata: +        purpose: openshift_docker_storage    volume_attachment:      type: OS::Cinder::VolumeAttachment      properties:        volume_id: { get_resource: cinder_volume }        instance_uuid: { get_resource: server } -      mountpoint: /dev/sdb  {% endif %} diff --git a/roles/openshift_openstack/templates/user_data.j2 b/roles/openshift_openstack/templates/user_data.j2 index eb65f7cec..ccaa5d464 100644 --- a/roles/openshift_openstack/templates/user_data.j2 +++ b/roles/openshift_openstack/templates/user_data.j2 @@ -1,9 +1,9 @@  #cloud-config -disable_root: true +disable_root: {{ openshift_openstack_disable_root }}  system_info:    default_user: -    name: openshift +    name: {{ openshift_openstack_user }}      sudo: ["ALL=(ALL) NOPASSWD: ALL"]  write_files: diff --git a/roles/openshift_provisioners/defaults/main.yaml b/roles/openshift_provisioners/defaults/main.yaml index a6f040831..34ba78404 100644 --- a/roles/openshift_provisioners/defaults/main.yaml +++ b/roles/openshift_provisioners/defaults/main.yaml @@ -1,7 +1,5 @@  ---  openshift_provisioners_install_provisioners: True -openshift_provisioners_image_prefix: docker.io/openshift/origin- -openshift_provisioners_image_version: latest  openshift_provisioners_efs: False  openshift_provisioners_efs_path: /persistentvolumes @@ -10,3 +8,11 @@ openshift_provisioners_efs_nodeselector: ""  openshift_provisioners_efs_supplementalgroup: '65534'  openshift_provisioners_project: openshift-infra + +openshift_provisioners_image_prefix_dict: +  origin: "docker.io/openshift/origin-" +  openshift-enterprise: "registry.access.redhat.com/openshift3/ose-" + +openshift_provisioners_image_version_dict: +  origin: "latest" +  openshift-enterprise: "{{ openshift_image_tag }}" diff --git a/roles/openshift_provisioners/tasks/main.yaml b/roles/openshift_provisioners/tasks/main.yaml index 4ba26b2b8..d00573b07 100644 --- a/roles/openshift_provisioners/tasks/main.yaml +++ b/roles/openshift_provisioners/tasks/main.yaml @@ -12,6 +12,11 @@    check_mode: no    tags: provisioners_init +- name: Set eventrouter image facts +  set_fact: +    openshift_provisioners_image_prefix: "{{ openshift_provisioners_image_prefix | default(openshift_provisioners_image_prefix_dict[openshift_deployment_type]) }}" +    openshift_provisioners_image_version: "{{ openshift_provisioners_image_version | default(openshift_provisioners_image_version_dict[openshift_deployment_type]) }}" +  - include_tasks: install_provisioners.yaml    when: openshift_provisioners_install_provisioners | default(false) | bool diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 62d460272..08dfd8284 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -61,3 +61,17 @@    when:      - template_service_broker_remove | default(false) | bool      - template_service_broker_install | default(true) | bool + +- name: Ensure that all requires vsphere configuration variables are set +  fail: +    msg: > +      When the vSphere cloud provider is configured you must define all of these variables: +      openshift_cloudprovider_vsphere_username, openshift_cloudprovider_vsphere_password, +      openshift_cloudprovider_vsphere_host, openshift_cloudprovider_vsphere_datacenter, +      openshift_cloudprovider_vsphere_datastore +  when: +    - openshift_cloudprovider_kind is defined +    - openshift_cloudprovider_kind == 'vsphere' +    - ( openshift_cloudprovider_vsphere_username is undefined or openshift_cloudprovider_vsphere_password is undefined or +        openshift_cloudprovider_vsphere_host is undefined or openshift_cloudprovider_vsphere_datacenter is undefined or +        openshift_cloudprovider_vsphere_datastore is undefined ) diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md index f7bd58db3..70a89b0ba 100644 --- a/roles/openshift_storage_glusterfs/README.md +++ b/roles/openshift_storage_glusterfs/README.md @@ -73,49 +73,51 @@ Role Variables  This role has the following variables that control the integration of a  GlusterFS cluster into a new or existing OpenShift cluster: -| Name                                             | Default value           | Description                             | -|--------------------------------------------------|-------------------------|-----------------------------------------| -| openshift_storage_glusterfs_timeout              | 300                     | Seconds to wait for pods to become ready -| openshift_storage_glusterfs_namespace            | 'glusterfs'             | Namespace/project in which to create GlusterFS resources -| openshift_storage_glusterfs_is_native            | True                    | GlusterFS should be containerized -| openshift_storage_glusterfs_name                 | 'storage'               | A name to identify the GlusterFS cluster, which will be used in resource names -| openshift_storage_glusterfs_nodeselector         | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name -| openshift_storage_glusterfs_use_default_selector | False                   | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. -| openshift_storage_glusterfs_storageclass         | True                    | Automatically create a StorageClass for each GlusterFS cluster -| openshift_storage_glusterfs_storageclass_default | False                   | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_image                | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' -| openshift_storage_glusterfs_version              | 'latest'                | Container image version to use for GlusterFS pods -| openshift_storage_glusterfs_block_deploy         | True                    | Deploy glusterblock provisioner service -| openshift_storage_glusterfs_block_image          | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7' -| openshift_storage_glusterfs_block_version        | 'latest'                | Container image version to use for glusterblock-provisioner pod -| openshift_storage_glusterfs_block_host_vol_create| True                    | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned -| openshift_storage_glusterfs_block_host_vol_size  | 100                     | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes -| openshift_storage_glusterfs_block_host_vol_max   | 15                      | Max number of GlusterFS volumes to host glusterblock volumes -| openshift_storage_glusterfs_s3_deploy            | True                    | Deploy gluster-s3 service -| openshift_storage_glusterfs_s3_image             | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' -| openshift_storage_glusterfs_s3_version           | 'latest'                | Container image version to use for gluster=s3 pod -| openshift_storage_glusterfs_s3_account           | Undefined               | S3 account name for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_user              | Undefined               | S3 user name for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_password          | Undefined               | S3 user password for the S3 service, required for S3 service deployment -| openshift_storage_glusterfs_s3_pvc               | Dynamic                 | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default -| openshift_storage_glusterfs_s3_pvc_size          | "2Gi"                   | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage -| openshift_storage_glusterfs_s3_meta_pvc          | Dynamic                 | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default -| openshift_storage_glusterfs_s3_meta_pvc_size     | "1Gi"                   | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage -| openshift_storage_glusterfs_wipe                 | False                   | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** -| openshift_storage_glusterfs_heketi_is_native     | True                    | heketi should be containerized -| openshift_storage_glusterfs_heketi_cli           | 'heketi-cli'            | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible -| openshift_storage_glusterfs_heketi_image         | 'heketi/heketi'         | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' -| openshift_storage_glusterfs_heketi_version       | 'latest'                | Container image version to use for heketi pods -| openshift_storage_glusterfs_heketi_admin_key     | auto-generated          | String to use as secret key for performing heketi commands as admin -| openshift_storage_glusterfs_heketi_user_key      | auto-generated          | String to use as secret key for performing heketi commands as user that can only view or modify volumes -| openshift_storage_glusterfs_heketi_topology_load | True                    | Load the GlusterFS topology information into heketi -| openshift_storage_glusterfs_heketi_url           | Undefined               | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. -| openshift_storage_glusterfs_heketi_port          | 8080                    | TCP port for external heketi service **NOTE:** This has no effect in native mode -| openshift_storage_glusterfs_heketi_executor      | 'kubernetes'            | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes -| openshift_storage_glusterfs_heketi_ssh_port      | 22                      | SSH port for external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_user      | 'root'                  | SSH user for external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_sudo      | False                   | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi -| openshift_storage_glusterfs_heketi_ssh_keyfile   | Undefined               | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path +| Name                                                   | Default value           | Description                             | +|--------------------------------------------------------|-------------------------|-----------------------------------------| +| openshift_storage_glusterfs_timeout                    | 300                     | Seconds to wait for pods to become ready +| openshift_storage_glusterfs_namespace                  | 'glusterfs'             | Namespace/project in which to create GlusterFS resources +| openshift_storage_glusterfs_is_native                  | True                    | GlusterFS should be containerized +| openshift_storage_glusterfs_name                       | 'storage'               | A name to identify the GlusterFS cluster, which will be used in resource names +| openshift_storage_glusterfs_nodeselector               | 'glusterfs=storage-host'| Selector to determine which nodes will host GlusterFS pods in native mode. **NOTE:** The label value is taken from the cluster name +| openshift_storage_glusterfs_use_default_selector       | False                   | Whether to use a default node selector for the GlusterFS namespace/project. If False, the namespace/project will have no restricting node selector. If True, uses pre-existing or default (e.g. osm_default_node_selector) node selectors. **NOTE:** If True, nodes which will host GlusterFS pods must already have the additional labels. +| openshift_storage_glusterfs_storageclass               | True                    | Automatically create a StorageClass for each GlusterFS cluster +| openshift_storage_glusterfs_storageclass_default       | False                   | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_image                      | 'gluster/gluster-centos'| Container image to use for GlusterFS pods, enterprise default is 'rhgs3/rhgs-server-rhel7' +| openshift_storage_glusterfs_version                    | 'latest'                | Container image version to use for GlusterFS pods +| openshift_storage_glusterfs_block_deploy               | True                    | Deploy glusterblock provisioner service +| openshift_storage_glusterfs_block_image                | 'gluster/glusterblock-provisioner'| Container image to use for glusterblock-provisioner pod, enterprise default is 'rhgs3/rhgs-gluster-block-prov-rhel7' +| openshift_storage_glusterfs_block_version              | 'latest'                | Container image version to use for glusterblock-provisioner pod +| openshift_storage_glusterfs_block_host_vol_create      | True                    | Automatically create GlusterFS volumes to host glusterblock volumes. **NOTE:** If this is False, block-hosting volumes will need to be manually created before glusterblock volumes can be provisioned +| openshift_storage_glusterfs_block_host_vol_size        | 100                     | Size, in GB, of GlusterFS volumes that will be automatically create to host glusterblock volumes if not enough space is available for a glusterblock volume create request. **NOTE:** This value is effectively an upper limit on the size of glusterblock volumes unless you manually create larger GlusterFS block-hosting volumes +| openshift_storage_glusterfs_block_host_vol_max         | 15                      | Max number of GlusterFS volumes to host glusterblock volumes +| openshift_storage_glusterfs_block_storageclass         | False                   | Automatically create a StorageClass for each Gluster Block cluster +| openshift_storage_glusterfs_block_storageclass_default | False                   | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_s3_deploy                  | True                    | Deploy gluster-s3 service +| openshift_storage_glusterfs_s3_image                   | 'gluster/gluster-object'| Container image to use for gluster-s3 pod, enterprise default is 'rhgs3/rhgs-gluster-s3-server-rhel7' +| openshift_storage_glusterfs_s3_version                 | 'latest'                | Container image version to use for gluster=s3 pod +| openshift_storage_glusterfs_s3_account                 | Undefined               | S3 account name for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_user                    | Undefined               | S3 user name for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_password                | Undefined               | S3 user password for the S3 service, required for S3 service deployment +| openshift_storage_glusterfs_s3_pvc                     | Dynamic                 | Name of the GlusterFS-backed PVC which will be used for S3 object data storage, generated from the cluster name and S3 account by default +| openshift_storage_glusterfs_s3_pvc_size                | "2Gi"                   | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object data storage +| openshift_storage_glusterfs_s3_meta_pvc                | Dynamic                 | Name of the GlusterFS-backed PVC which will be used for S3 object metadata storage, generated from the cluster name and S3 account by default +| openshift_storage_glusterfs_s3_meta_pvc_size           | "1Gi"                   | Size, in Gi, of the GlusterFS-backed PVC which will be used for S3 object metadata storage +| openshift_storage_glusterfs_wipe                       | False                   | Destroy any existing GlusterFS resources and wipe storage devices. **WARNING: THIS WILL DESTROY ANY DATA ON THOSE DEVICES.** +| openshift_storage_glusterfs_heketi_is_native           | True                    | heketi should be containerized +| openshift_storage_glusterfs_heketi_cli                 | 'heketi-cli'            | Command/Path to invoke the heketi-cli tool **NOTE:** Change this only for **non-native heketi** if heketi-cli is not in the global `$PATH` of the machine running openshift-ansible +| openshift_storage_glusterfs_heketi_image               | 'heketi/heketi'         | Container image to use for heketi pods, enterprise default is 'rhgs3/rhgs-volmanager-rhel7' +| openshift_storage_glusterfs_heketi_version             | 'latest'                | Container image version to use for heketi pods +| openshift_storage_glusterfs_heketi_admin_key           | auto-generated          | String to use as secret key for performing heketi commands as admin +| openshift_storage_glusterfs_heketi_user_key            | auto-generated          | String to use as secret key for performing heketi commands as user that can only view or modify volumes +| openshift_storage_glusterfs_heketi_topology_load       | True                    | Load the GlusterFS topology information into heketi +| openshift_storage_glusterfs_heketi_url                 | Undefined               | When heketi is native, this sets the hostname portion of the final heketi route URL. When heketi is external, this is the FQDN or IP address to the heketi service. +| openshift_storage_glusterfs_heketi_port                | 8080                    | TCP port for external heketi service **NOTE:** This has no effect in native mode +| openshift_storage_glusterfs_heketi_executor            | 'kubernetes'            | Selects how a native heketi service will manage GlusterFS nodes: 'kubernetes' for native nodes, 'ssh' for external nodes +| openshift_storage_glusterfs_heketi_ssh_port            | 22                      | SSH port for external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_user            | 'root'                  | SSH user for external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_sudo            | False                   | Whether to sudo (if non-root user) for SSH to external GlusterFS nodes via native heketi +| openshift_storage_glusterfs_heketi_ssh_keyfile         | Undefined               | Path to a private key file for use with SSH connections to external GlusterFS nodes via native heketi **NOTE:** This must be an absolute path  | openshift_storage_glusterfs_heketi_fstab         | '/var/lib/heketi/fstab' | When heketi is native, sets the path to the fstab file on the GlusterFS nodes to update on LVM volume mounts, changes to '/etc/fstab/' when the heketi executor is 'ssh' **NOTE:** This should not need to be changed  | openshift_storage_glusterfs_heketi_wipe          | False                   | Destroy any existing heketi resources, defaults to the value of `openshift_storage_glusterfs_wipe` @@ -126,14 +128,16 @@ registry. These variables start with the prefix  values in their corresponding non-registry variables. The following variables  are an exception: -| Name                                                      | Default value         | Description                             | -|-----------------------------------------------------------|-----------------------|-----------------------------------------| -| openshift_storage_glusterfs_registry_namespace            | registry namespace    | Default is to use the hosted registry's namespace, otherwise 'glusterfs' -| openshift_storage_glusterfs_registry_name                 | 'registry'            | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters -| openshift_storage_glusterfs_registry_storageclass         | False                 | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties -| openshift_storage_glusterfs_registry_storageclass_default | False                 | Sets the StorageClass for each GlusterFS cluster as default -| openshift_storage_glusterfs_registry_heketi_admin_key     | auto-generated        | Separate from the above -| openshift_storage_glusterfs_registry_heketi_user_key      | auto-generated        | Separate from the above +| Name                                                            | Default value         | Description                             | +|-----------------------------------------------------------------|-----------------------|-----------------------------------------| +| openshift_storage_glusterfs_registry_namespace                  | registry namespace    | Default is to use the hosted registry's namespace, otherwise 'glusterfs' +| openshift_storage_glusterfs_registry_name                       | 'registry'            | This allows for the logical separation of the registry GlusterFS cluster from other GlusterFS clusters +| openshift_storage_glusterfs_registry_storageclass               | False                 | It is recommended to not create a StorageClass for GlusterFS clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_storageclass_default       | False                 | Sets the StorageClass for each GlusterFS cluster as default +| openshift_storage_glusterfs_registry_block_storageclass         | False                 | It is recommended to not create a StorageClass for Gluster Block clusters serving registry storage, so as to avoid performance penalties +| openshift_storage_glusterfs_registry_block_storageclass_default | False                 | Sets the StorageClass for each Gluster Block cluster as default +| openshift_storage_glusterfs_registry_heketi_admin_key           | auto-generated        | Separate from the above +| openshift_storage_glusterfs_registry_heketi_user_key            | auto-generated        | Separate from the above  Additionally, this role's behavior responds to several registry-specific variables in the [openshift_hosted role](../openshift_hosted/README.md): diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index 4cbe262d2..7e751cc7a 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -14,6 +14,8 @@ openshift_storage_glusterfs_block_version: 'latest'  openshift_storage_glusterfs_block_host_vol_create: True  openshift_storage_glusterfs_block_host_vol_size: 100  openshift_storage_glusterfs_block_host_vol_max: 15 +openshift_storage_glusterfs_block_storageclass: False +openshift_storage_glusterfs_block_storageclass_default: False  openshift_storage_glusterfs_s3_deploy: True  openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"  openshift_storage_glusterfs_s3_version: 'latest' @@ -61,6 +63,8 @@ openshift_storage_glusterfs_registry_block_version: "{{ openshift_storage_gluste  openshift_storage_glusterfs_registry_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"  openshift_storage_glusterfs_registry_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"  openshift_storage_glusterfs_registry_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}" +openshift_storage_glusterfs_registry_block_storageclass: False +openshift_storage_glusterfs_registry_block_storageclass_default: False  openshift_storage_glusterfs_registry_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy }}"  openshift_storage_glusterfs_registry_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"  openshift_storage_glusterfs_registry_s3_version: "{{ openshift_storage_glusterfs_s3_version }}" @@ -103,3 +107,9 @@ r_openshift_storage_glusterfs_os_firewall_allow:    port: "24008/tcp"  - service: glusterfs_bricks    port: "49152-49251/tcp" +- service: glusterblockd +  port: "24010/tcp" +- service: iscsi-targets +  port: "3260/tcp" +- service: rpcbind +  port: "111/tcp" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index 001578406..e6e261b52 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -313,7 +313,36 @@    - glusterfs_storageclass or glusterfs_s3_deploy  - include_tasks: glusterblock_deploy.yml -  when: glusterfs_block_deploy +  when: +  - glusterfs_block_deploy +  #TODO: Remove this when multipathd will be available on atomic +  - not openshift_is_atomic | bool + +- block: +  - name: Create heketi block secret +    oc_secret: +      namespace: "{{ glusterfs_namespace }}" +      state: present +      name: "heketi-{{ glusterfs_name }}-admin-secret-block" +      type: "gluster.org/glusterblock" +      force: True +      contents: +      - path: key +        data: "{{ glusterfs_heketi_admin_key }}" +    when: glusterfs_heketi_admin_key is defined +  - name: Generate Gluster Block StorageClass file +    template: +      src: "{{ openshift.common.examples_content_version }}/gluster-block-storageclass.yml.j2" +      dest: "{{ mktemp.stdout }}/gluster-block-storageclass.yml" + +  - name: Create Gluster Block StorageClass +    oc_obj: +      state: present +      kind: storageclass +      name: "glusterfs-{{ glusterfs_name }}-block" +      files: +      - "{{ mktemp.stdout }}/gluster-block-storageclass.yml" +  when: glusterfs_block_storageclass  - include_tasks: gluster_s3_deploy.yml    when: glusterfs_s3_deploy diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml index a374df0ce..92de1b64d 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_config.yml @@ -17,6 +17,8 @@      glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_block_host_vol_create }}"      glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_block_host_vol_size }}"      glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_block_host_vol_max }}" +    glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_block_storageclass | bool }}" +    glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_block_storageclass_default | bool }}"      glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_s3_deploy | bool }}"      glusterfs_s3_image: "{{ openshift_storage_glusterfs_s3_image }}"      glusterfs_s3_version: "{{ openshift_storage_glusterfs_s3_version }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml index 544a6f491..10c29fd37 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_registry.yml @@ -17,6 +17,8 @@      glusterfs_block_host_vol_create: "{{ openshift_storage_glusterfs_registry_block_host_vol_create }}"      glusterfs_block_host_vol_size: "{{ openshift_storage_glusterfs_registry_block_host_vol_size }}"      glusterfs_block_host_vol_max: "{{ openshift_storage_glusterfs_registry_block_host_vol_max }}" +    glusterfs_block_storageclass: "{{ openshift_storage_glusterfs_registry_block_storageclass | bool }}" +    glusterfs_block_storageclass_default: "{{ openshift_storage_glusterfs_registry_block_storageclass_default | bool }}"      glusterfs_s3_deploy: "{{ openshift_storage_glusterfs_registry_s3_deploy | bool }}"      glusterfs_s3_image: "{{ openshift_storage_glusterfs_registry_s3_image }}"      glusterfs_s3_version: "{{ openshift_storage_glusterfs_registry_s3_version }}" @@ -46,7 +48,7 @@      glusterfs_heketi_ssh_sudo: "{{ openshift_storage_glusterfs_registry_heketi_ssh_sudo | bool }}"      glusterfs_heketi_ssh_keyfile: "{{ openshift_storage_glusterfs_registry_heketi_ssh_keyfile }}"      glusterfs_heketi_fstab: "{{ openshift_storage_glusterfs_registry_heketi_fstab }}" -    glusterfs_nodes: "{% if groups.glusterfs_registry is defined %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}" +    glusterfs_nodes: "{% if groups.glusterfs_registry is defined and groups['glusterfs_registry'] | length > 0 %}{% set nodes = groups.glusterfs_registry %}{% elif 'groups.glusterfs' is defined and groups['glusterfs'] | length > 0 %}{% set nodes = groups.glusterfs %}{% else %}{% set nodes = '[]' %}{% endif %}{{ nodes }}"  - include_tasks: glusterfs_common.yml    when: diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf index dd4d6e6f7..bcc02e217 100644 --- a/roles/openshift_storage_glusterfs/templates/glusterfs.conf +++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf @@ -1,4 +1,7 @@  #{{ ansible_managed }}  dm_thin_pool  dm_snapshot -dm_mirror
\ No newline at end of file +dm_mirror +#glusterblock +dm_multipath +target_core_user diff --git a/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.7/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: +  name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} +  annotations: +    storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: +  resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" +  restuser: "admin" +  chapauthenabled: "true" +  hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} +  restsecretnamespace: "{{ glusterfs_namespace }}" +  restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.8/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: +  name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} +  annotations: +    storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: +  resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" +  restuser: "admin" +  chapauthenabled: "true" +  hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} +  restsecretnamespace: "{{ glusterfs_namespace }}" +  restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 new file mode 100644 index 000000000..02ed8fa8d --- /dev/null +++ b/roles/openshift_storage_glusterfs/templates/v3.9/gluster-block-storageclass.yml.j2 @@ -0,0 +1,19 @@ +--- +apiVersion: storage.k8s.io/v1 +kind: StorageClass +metadata: +  name: glusterfs-{{ glusterfs_name }}-block +{% if glusterfs_block_storageclass_default is defined and glusterfs_block_storageclass_default %} +  annotations: +    storageclass.kubernetes.io/is-default-class: "true" +{% endif %} +provisioner: gluster.org/glusterblock +parameters: +  resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}" +  restuser: "admin" +  chapauthenabled: "true" +  hacount: "3" +{% if glusterfs_heketi_admin_key is defined %} +  restsecretnamespace: "{{ glusterfs_namespace }}" +  restsecretname: "heketi-{{ glusterfs_name }}-admin-secret-block" +{%- endif -%} diff --git a/roles/openshift_version/tasks/first_master.yml b/roles/openshift_version/tasks/first_master.yml index e01a56dc1..b0d155c2c 100644 --- a/roles/openshift_version/tasks/first_master.yml +++ b/roles/openshift_version/tasks/first_master.yml @@ -19,7 +19,7 @@    - set_fact:        openshift_pkg_version: -{{ openshift_version }}    when: -  - openshift_pkg_version is not defined +  - openshift_pkg_version is not defined or openshift_pkg_version == ""    - openshift_upgrade_target is not defined  - block: @@ -28,5 +28,5 @@    - set_fact:        openshift_image_tag: v{{ openshift_version }}    when: > -    openshift_image_tag is not defined +    openshift_image_tag is not defined or openshift_image_tag == ""      or l_force_image_tag_to_version | bool diff --git a/roles/openshift_version/tasks/first_master_containerized_version.yml b/roles/openshift_version/tasks/first_master_containerized_version.yml index 3ed1d2cfe..9eb38cb2b 100644 --- a/roles/openshift_version/tasks/first_master_containerized_version.yml +++ b/roles/openshift_version/tasks/first_master_containerized_version.yml @@ -6,6 +6,7 @@      openshift_version: "{{ openshift_image_tag[1:].split('-')[0] if openshift_image_tag != 'latest' else openshift_image_tag }}"    when:    - openshift_image_tag is defined +  - openshift_image_tag != ""    - openshift_version is not defined    - not (openshift_version_reinit | default(false)) diff --git a/roles/openshift_version/tasks/first_master_rpm_version.yml b/roles/openshift_version/tasks/first_master_rpm_version.yml index 5d92f90c6..85e440513 100644 --- a/roles/openshift_version/tasks/first_master_rpm_version.yml +++ b/roles/openshift_version/tasks/first_master_rpm_version.yml @@ -5,6 +5,7 @@      openshift_version: "{{ openshift_pkg_version[1:].split('-')[0] }}"    when:    - openshift_pkg_version is defined +  - openshift_pkg_version != ""    - openshift_version is not defined    - not (openshift_version_reinit | default(false))  | 
