diff options
Diffstat (limited to 'roles')
240 files changed, 2318 insertions, 814 deletions
diff --git a/roles/ansible_service_broker/meta/main.yml b/roles/ansible_service_broker/meta/main.yml index ec4aafb79..65b736500 100644 --- a/roles/ansible_service_broker/meta/main.yml +++ b/roles/ansible_service_broker/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info:    categories:    - cloud  dependencies: +- role: lib_utils  - role: lib_openshift diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml index 4ca47d074..ba2f7293b 100644 --- a/roles/ansible_service_broker/tasks/install.yml +++ b/roles/ansible_service_broker/tasks/install.yml @@ -4,7 +4,7 @@  - name: Set default image variables based on deployment type    include_vars: "{{ item }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"  - name: set ansible_service_broker facts diff --git a/roles/calico/meta/main.yml b/roles/calico/meta/main.yml index 816c81369..e3997911b 100644 --- a/roles/calico/meta/main.yml +++ b/roles/calico/meta/main.yml @@ -13,5 +13,6 @@ galaxy_info:    - cloud    - system  dependencies: +- role: lib_utils  - role: openshift_facts  - role: openshift_master_facts diff --git a/roles/calico_master/meta/main.yml b/roles/calico_master/meta/main.yml index 4d70c79cf..73c94db4e 100644 --- a/roles/calico_master/meta/main.yml +++ b/roles/calico_master/meta/main.yml @@ -13,5 +13,6 @@ galaxy_info:    - cloud    - system  dependencies: +- role: lib_utils  - role: calico  - role: openshift_facts diff --git a/roles/calico_master/tasks/main.yml b/roles/calico_master/tasks/main.yml index 16d960d8b..05415a4d6 100644 --- a/roles/calico_master/tasks/main.yml +++ b/roles/calico_master/tasks/main.yml @@ -19,7 +19,7 @@  - name: Calico Master | Launch Calico Policy Controller    command: > -    {{ openshift.common.client_binary }} create +    {{ openshift_client_binary }} create      -f {{ mktemp.stdout }}/calico-policy-controller.yml      --config={{ openshift.common.config_base }}/master/admin.kubeconfig    register: calico_create_output diff --git a/roles/cockpit-ui/meta/main.yml b/roles/cockpit-ui/meta/main.yml index 4d619fff6..372c29c28 100644 --- a/roles/cockpit-ui/meta/main.yml +++ b/roles/cockpit-ui/meta/main.yml @@ -12,4 +12,6 @@ galaxy_info:    categories:    - cloud  dependencies: +- role: lib_utils  - role: lib_openshift +- role: openshift_facts diff --git a/roles/cockpit-ui/tasks/main.yml b/roles/cockpit-ui/tasks/main.yml index f60912033..d4174d879 100644 --- a/roles/cockpit-ui/tasks/main.yml +++ b/roles/cockpit-ui/tasks/main.yml @@ -39,7 +39,7 @@    - name: Deploy registry-console      command: > -      {{ openshift.common.client_binary }} new-app --template=registry-console +      {{ openshift_client_binary }} new-app --template=registry-console        {% if openshift_cockpit_deployer_prefix is defined  %}-p IMAGE_PREFIX="{{ openshift_cockpit_deployer_prefix }}"{% endif %}        {% if openshift_cockpit_deployer_basename is defined  %}-p IMAGE_BASENAME="{{ openshift_cockpit_deployer_basename }}"{% endif %}        {% if openshift_cockpit_deployer_version is defined  %}-p IMAGE_VERSION="{{ openshift_cockpit_deployer_version }}"{% endif %} diff --git a/roles/cockpit/meta/main.yml b/roles/cockpit/meta/main.yml index 8c0ed3cb8..07e466f04 100644 --- a/roles/cockpit/meta/main.yml +++ b/roles/cockpit/meta/main.yml @@ -12,4 +12,4 @@ galaxy_info:    categories:    - cloud  dependencies: -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/cockpit/tasks/main.yml b/roles/cockpit/tasks/main.yml index fc13afed3..577cd7daf 100644 --- a/roles/cockpit/tasks/main.yml +++ b/roles/cockpit/tasks/main.yml @@ -10,7 +10,7 @@      - cockpit-bridge      - cockpit-docker      - "{{ cockpit_plugins }}" -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool    register: result    until: result is succeeded @@ -19,4 +19,4 @@      name: cockpit.socket      enabled: true      state: started -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool diff --git a/roles/container_runtime/defaults/main.yml b/roles/container_runtime/defaults/main.yml index dd185cb38..f4e249792 100644 --- a/roles/container_runtime/defaults/main.yml +++ b/roles/container_runtime/defaults/main.yml @@ -2,8 +2,6 @@  docker_cli_auth_config_path: '/root/.docker'  openshift_docker_signature_verification: False -repoquery_cmd: "{{ 'dnf repoquery --latest-limit 1 -d 0' if ansible_pkg_mgr == 'dnf' else 'repoquery --plugins' }}" -  openshift_docker_alternative_creds: False  # oreg_url is defined by user input. @@ -55,11 +53,25 @@ openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['o  docker_alt_storage_path: /var/lib/containers/docker  docker_default_storage_path: /var/lib/docker +docker_storage_path: "{{ docker_default_storage_path }}" +docker_storage_size: 40G +docker_storage_setup_options: +  vg: docker_vg +  data_size: 99%VG +  storage_driver: overlay2 +  root_lv_name: docker-root-lv +  root_lv_size: 100%FREE +  root_lv_mount_path: "{{ docker_storage_path }}" +docker_storage_extra_options: +- "--storage-opt overlay2.override_kernel_check=true" +- "--storage-opt overlay2.size={{ docker_storage_size }}" +- "--graph={{ docker_storage_path}}" +  # Set local versions of facts that must be in json format for container-daemon.json  # NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson  l_docker_log_options: "{{ l2_docker_log_options | to_json }}" -l_docker_log_options_dict: "{{ l2_docker_log_options | oo_list_to_dict | to_json }}" +l_docker_log_options_dict: "{{ l2_docker_log_options | lib_utils_oo_list_to_dict | to_json }}"  l_docker_additional_registries: "{{ l2_docker_additional_registries | to_json }}"  l_docker_blocked_registries: "{{ l2_docker_blocked_registries | to_json }}"  l_docker_insecure_registries: "{{ l2_docker_insecure_registries | to_json }}" diff --git a/roles/container_runtime/meta/main.yml b/roles/container_runtime/meta/main.yml index 02fceb745..3bc2607fb 100644 --- a/roles/container_runtime/meta/main.yml +++ b/roles/container_runtime/meta/main.yml @@ -11,5 +11,5 @@ galaxy_info:      - 7  dependencies:  - role: lib_openshift -- role: lib_os_firewall  - role: lib_utils +- role: openshift_facts diff --git a/roles/container_runtime/tasks/common/syscontainer_packages.yml b/roles/container_runtime/tasks/common/syscontainer_packages.yml index b41122880..d429047e6 100644 --- a/roles/container_runtime/tasks/common/syscontainer_packages.yml +++ b/roles/container_runtime/tasks/common/syscontainer_packages.yml @@ -4,7 +4,7 @@    package:      name: container-selinux      state: present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded @@ -13,7 +13,7 @@    package:      name: atomic      state: present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded @@ -23,6 +23,6 @@    package:      name: runc      state: present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/container_runtime/tasks/docker_storage_setup_overlay.yml b/roles/container_runtime/tasks/docker_storage_setup_overlay.yml new file mode 100644 index 000000000..782c002e3 --- /dev/null +++ b/roles/container_runtime/tasks/docker_storage_setup_overlay.yml @@ -0,0 +1,10 @@ +--- +- name: Setup the docker-storage for overlay +  template: +    src: docker_storage_setup.j2 +    dest: /etc/sysconfig/docker-storage-setup +    owner: root +    group: root +    mode: 0664 +  when: +  - container_runtime_docker_storage_type == 'overlay2' diff --git a/roles/container_runtime/tasks/docker_upgrade_check.yml b/roles/container_runtime/tasks/docker_upgrade_check.yml index 6731963dd..7831f4c7d 100644 --- a/roles/container_runtime/tasks/docker_upgrade_check.yml +++ b/roles/container_runtime/tasks/docker_upgrade_check.yml @@ -61,14 +61,14 @@  - name: Determine available Docker    shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"    register: g_atomic_docker_version_result -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - set_fact:      l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}" -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - fail:      msg: This playbook requires access to Docker 1.12 or later    when: -  - openshift.common.is_atomic | bool +  - openshift_is_atomic | bool    - l_docker_version.avail_version | default(l_docker_version.curr_version, true) is version_compare('1.12','<') diff --git a/roles/container_runtime/tasks/package_docker.yml b/roles/container_runtime/tasks/package_docker.yml index d9d4037dd..d6e7e7fed 100644 --- a/roles/container_runtime/tasks/package_docker.yml +++ b/roles/container_runtime/tasks/package_docker.yml @@ -3,7 +3,7 @@  - name: Get current installed Docker version    command: "{{ repoquery_installed }} --qf '%{version}' docker" -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: curr_docker_version    retries: 4    until: curr_docker_version is succeeded @@ -20,7 +20,7 @@      name: "docker{{ '-' + docker_version if docker_version is defined else '' }}"      state: present    when: -  - not (openshift.common.is_atomic | bool) +  - not (openshift_is_atomic | bool)    - not (curr_docker_version is skipped)    - not (curr_docker_version.stdout != '')    register: result @@ -48,7 +48,7 @@    lineinfile:      dest: /etc/sysconfig/docker      regexp: '^{{ item.reg_conf_var }}=.*$' -    line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'" +    line: "{{ item.reg_conf_var }}='{{ item.reg_fact_val | lib_utils_oo_prepend_strings_in_list(item.reg_flag ~ ' ') | join(' ') }}'"    when:    - item.reg_fact_val != []    - docker_check.stat.isreg is defined @@ -101,7 +101,7 @@      line: "OPTIONS='\        {% if ansible_selinux.status | default(None) == 'enabled' and openshift_docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \        {% if openshift_docker_log_driver | bool %} --log-driver {{ openshift_docker_log_driver }}{% endif %} \ -      {% if l2_docker_log_options != [] %} {{ l2_docker_log_options |  oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \ +      {% if l2_docker_log_options != [] %} {{ l2_docker_log_options |  lib_utils_oo_split() | lib_utils_oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \        {% if openshift_docker_hosted_registry_insecure and (openshift_docker_hosted_registry_network | bool) %} --insecure-registry={{ openshift_docker_hosted_registry_network }} {% endif %} \        {% if docker_options is defined %} {{ docker_options }}{% endif %} \        {% if openshift_docker_options %} {{ openshift_docker_options }}{% endif %} \ diff --git a/roles/container_runtime/tasks/systemcontainer_crio.yml b/roles/container_runtime/tasks/systemcontainer_crio.yml index 61f122f3c..6a195a938 100644 --- a/roles/container_runtime/tasks/systemcontainer_crio.yml +++ b/roles/container_runtime/tasks/systemcontainer_crio.yml @@ -3,7 +3,7 @@  - name: Check we are not using node as a Docker container with CRI-O    fail: msg='Cannot use CRI-O with node configured as a Docker container'    when: -    - openshift.common.is_containerized | bool +    - openshift_is_containerized | bool      - not l_is_node_system_container | bool  - include_tasks: common/pre.yml diff --git a/roles/container_runtime/tasks/systemcontainer_docker.yml b/roles/container_runtime/tasks/systemcontainer_docker.yml index 639585367..dc0452553 100644 --- a/roles/container_runtime/tasks/systemcontainer_docker.yml +++ b/roles/container_runtime/tasks/systemcontainer_docker.yml @@ -18,7 +18,7 @@  # Make sure Docker is installed so we are able to use the client  - name: Install Docker so we can use the client    package: name=docker{{ '-' + docker_version if docker_version is defined else '' }} state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/container_runtime/templates/docker_storage_setup.j2 b/roles/container_runtime/templates/docker_storage_setup.j2 new file mode 100644 index 000000000..b056087e0 --- /dev/null +++ b/roles/container_runtime/templates/docker_storage_setup.j2 @@ -0,0 +1,12 @@ +# Edit this file to override any configuration options specified in +# /usr/lib/docker-storage-setup/docker-storage-setup. +# +# For more details refer to "man docker-storage-setup" +DEVS={{ container_runtime_docker_storage_setup_device }} +VG={{ docker_storage_setup_options.vg }} +DATA_SIZE={{ docker_storage_setup_options.data_size }} +STORAGE_DRIVER="{{ docker_storage_setup_options.storage_driver }}" +CONTAINER_ROOT_LV_NAME="{{ docker_storage_setup_options.root_lv_name }}" +CONTAINER_ROOT_LV_SIZE="{{ docker_storage_setup_options.root_lv_size }}" +CONTAINER_ROOT_LV_MOUNT_PATH="{{ docker_storage_setup_options.root_lv_mount_path }}" +EXTRA_STORAGE_OPTIONS="{{ docker_storage_extra_options | join(' ') }}" diff --git a/roles/contiv/defaults/main.yml b/roles/contiv/defaults/main.yml index aa976d921..8d06a5e96 100644 --- a/roles/contiv/defaults/main.yml +++ b/roles/contiv/defaults/main.yml @@ -101,7 +101,6 @@ apic_epg_bridge_domain: not_specified  apic_configure_default_policy: false  apic_default_external_contract: "uni/tn-common/brc-default"  apic_default_app_profile: "contiv-infra-app-profile" -is_atomic: False  kube_cert_dir: "/data/src/github.com/openshift/origin/openshift.local.config/master"  master_name: "{{ groups['masters'][0] }}"  contiv_etcd_port: 22379 diff --git a/roles/contiv/meta/main.yml b/roles/contiv/meta/main.yml index 52b9d09dd..67fb23db8 100644 --- a/roles/contiv/meta/main.yml +++ b/roles/contiv/meta/main.yml @@ -13,6 +13,7 @@ galaxy_info:    - cloud    - system  dependencies: +- role: lib_utils  - role: contiv_facts  - role: etcd    etcd_service: contiv-etcd diff --git a/roles/contiv/tasks/packageManagerInstall.yml b/roles/contiv/tasks/packageManagerInstall.yml index d5726476c..3367844a8 100644 --- a/roles/contiv/tasks/packageManagerInstall.yml +++ b/roles/contiv/tasks/packageManagerInstall.yml @@ -5,7 +5,7 @@  - include_tasks: pkgMgrInstallers/centos-install.yml    when: (ansible_os_family == "RedHat") and -        not is_atomic +        not openshift_is_atomic  - name: Package Manager | Set fact saying we did CentOS package install    set_fact: diff --git a/roles/contiv_facts/tasks/main.yml b/roles/contiv_facts/tasks/main.yml index 3267a4ab0..ced04759d 100644 --- a/roles/contiv_facts/tasks/main.yml +++ b/roles/contiv_facts/tasks/main.yml @@ -1,19 +1,4 @@  --- -- name: Determine if Atomic -  stat: path=/run/ostree-booted -  register: s -  changed_when: false -  check_mode: no - -- name: Init the is_atomic fact -  set_fact: -    is_atomic: false - -- name: Set the is_atomic fact -  set_fact: -    is_atomic: true -  when: s.stat.exists -  - name: Determine if CoreOS    raw: "grep '^NAME=' /etc/os-release | sed s'/NAME=//'"    register: distro @@ -85,4 +70,4 @@    when: has_rpm  - include_tasks: fedora-install.yml -  when: not is_atomic and ansible_distribution == "Fedora" +  when: not openshift_is_atomic and ansible_distribution == "Fedora" diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml index 86cea5c46..337727e47 100644 --- a/roles/etcd/defaults/main.yaml +++ b/roles/etcd/defaults/main.yaml @@ -5,7 +5,7 @@ r_etcd_common_backup_sufix_name: ''  l_is_etcd_system_container: "{{ (openshift_use_etcd_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"  # runc, docker, host -r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if l_is_containerized else 'host' }}" +r_etcd_common_etcd_runtime: "{{ 'runc' if l_is_etcd_system_container else 'docker' if openshift_is_containerized else 'host' }}"  r_etcd_common_embedded_etcd: false  osm_etcd_image: 'registry.access.redhat.com/rhel7/etcd' diff --git a/roles/etcd/meta/main.yml b/roles/etcd/meta/main.yml index f2e1fc310..af58eff62 100644 --- a/roles/etcd/meta/main.yml +++ b/roles/etcd/meta/main.yml @@ -17,6 +17,5 @@ galaxy_info:    - system  dependencies:  - role: lib_openshift -- role: lib_os_firewall  - role: lib_utils  - role: openshift_facts diff --git a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml index ccfd9da14..881a8c270 100644 --- a/roles/etcd/tasks/auxiliary/drop_etcdctl.yml +++ b/roles/etcd/tasks/auxiliary/drop_etcdctl.yml @@ -1,7 +1,7 @@  ---  - name: Install etcd for etcdctl    package: name=etcd{{ '-' + etcd_version if etcd_version is defined else '' }} state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml index 119071a72..d4518554c 100644 --- a/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_client_certificates_from_ca.yml @@ -28,7 +28,7 @@      etcd_client_certs_missing: "{{ true if etcd_certificates_redeploy | default(false) | bool                                     else (False in (g_external_etcd_cert_stat_result.results                                                     | default({}) -                                                   | oo_collect(attribute='stat.exists') +                                                   | lib_utils_oo_collect(attribute='stat.exists')                                                     | list)) }}"  - name: Ensure generated_certs directory present diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml index deb2301d7..59a6b6590 100644 --- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml +++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml @@ -21,7 +21,7 @@      etcd_server_certs_missing: "{{ true if etcd_certificates_redeploy | default(false) | bool                                     else (False in (g_etcd_server_cert_stat_result.results                                                     | default({}) -                                                   | oo_collect(attribute='stat.exists') +                                                   | lib_utils_oo_collect(attribute='stat.exists')                                                     | list)) }}"  - name: Ensure generated_certs directory present diff --git a/roles/etcd/tasks/migration/add_ttls.yml b/roles/etcd/tasks/migration/add_ttls.yml index a4b0ff31d..3d945344c 100644 --- a/roles/etcd/tasks/migration/add_ttls.yml +++ b/roles/etcd/tasks/migration/add_ttls.yml @@ -11,7 +11,7 @@  - name: Re-introduce leases (as a replacement for key TTLs)    command: > -    {{ openshift.common.client_binary }} adm migrate etcd-ttl \ +    {{ openshift_client_binary }} adm migrate etcd-ttl \      --cert {{ r_etcd_common_master_peer_cert_file }} \      --key {{ r_etcd_common_master_peer_key_file }} \      --cacert {{ r_etcd_common_master_peer_ca_file }} \ diff --git a/roles/etcd/tasks/migration/migrate.yml b/roles/etcd/tasks/migration/migrate.yml index 54a9c74ff..847b1d722 100644 --- a/roles/etcd/tasks/migration/migrate.yml +++ b/roles/etcd/tasks/migration/migrate.yml @@ -1,7 +1,7 @@  ---  # Should this be run in a serial manner?  - set_fact: -    l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}" +    l_etcd_service: "{{ 'etcd_container' if openshift_is_containerized else 'etcd' }}"  - name: Migrate etcd data    command: > diff --git a/roles/etcd/tasks/version_detect.yml b/roles/etcd/tasks/version_detect.yml index fe1e418d8..ab3626cec 100644 --- a/roles/etcd/tasks/version_detect.yml +++ b/roles/etcd/tasks/version_detect.yml @@ -12,7 +12,7 @@    - debug:        msg: "Etcd rpm version {{ etcd_rpm_version.stdout }} detected"    when: -  - not openshift.common.is_containerized | bool +  - not openshift_is_containerized | bool  - block:    - name: Record containerized etcd version (docker) @@ -52,4 +52,4 @@    - debug:        msg: "Etcd containerized version {{ etcd_container_version }} detected"    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool diff --git a/roles/flannel/meta/main.yml b/roles/flannel/meta/main.yml index 51128dba6..7634b8192 100644 --- a/roles/flannel/meta/main.yml +++ b/roles/flannel/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info:    categories:    - cloud    - system -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/flannel/tasks/main.yml b/roles/flannel/tasks/main.yml index 9b9250f31..4627bf69c 100644 --- a/roles/flannel/tasks/main.yml +++ b/roles/flannel/tasks/main.yml @@ -2,7 +2,7 @@  - name: Install flannel    become: yes    package: name=flannel state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/flannel_register/meta/main.yml b/roles/flannel_register/meta/main.yml index 73bddcca4..1e44ff5ba 100644 --- a/roles/flannel_register/meta/main.yml +++ b/roles/flannel_register/meta/main.yml @@ -13,4 +13,5 @@ galaxy_info:    - cloud    - system  dependencies: -- { role: openshift_facts } +- role: openshift_facts +- role: lib_utils diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml index 7fd5adf41..7eb8ed781 100644 --- a/roles/kuryr/meta/main.yml +++ b/roles/kuryr/meta/main.yml @@ -13,5 +13,6 @@ galaxy_info:    - cloud    - system  dependencies: -- { role: lib_openshift } -- { role: openshift_facts } +- role: lib_utils +- role: lib_openshift +- role: openshift_facts diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2 index d970270b5..155d1faab 100644 --- a/roles/kuryr/templates/controller-deployment.yaml.j2 +++ b/roles/kuryr/templates/controller-deployment.yaml.j2 @@ -22,6 +22,13 @@ spec:        - image: kuryr/controller:latest          imagePullPolicy: IfNotPresent          name: controller +{% if kuryr_openstack_enable_pools | default(false) %} +        readinessProbe: +          exec: +            command: +            - cat +            - /tmp/pools_loaded +{% endif %}          terminationMessagePath: "/dev/termination-log"          # FIXME(dulek): This shouldn't be required, but without it selinux is          #               complaining about access to kuryr.conf. diff --git a/roles/lib_os_firewall/README.md b/roles/lib_os_firewall/README.md deleted file mode 100644 index ba8c84865..000000000 --- a/roles/lib_os_firewall/README.md +++ /dev/null @@ -1,63 +0,0 @@ -lib_os_firewall -=========== - -lib_os_firewall manages iptables firewall settings for a minimal use -case (Adding/Removing rules based on protocol and port number). - -Note: firewalld is not supported on Atomic Host -https://bugzilla.redhat.com/show_bug.cgi?id=1403331 - -Requirements ------------- - -Ansible 2.2 - -Role Variables --------------- - -| Name                      | Default |                                        | -|---------------------------|---------|----------------------------------------| -| os_firewall_allow         | []      | List of service,port mappings to allow | -| os_firewall_deny          | []      | List of service, port mappings to deny | - -Dependencies ------------- - -None. - -Example Playbook ----------------- - -Use iptables and open tcp ports 80 and 443: -``` ---- -- hosts: servers -  vars: -    os_firewall_use_firewalld: false -    os_firewall_allow: -    - service: httpd -      port: 80/tcp -    - service: https -      port: 443/tcp -  tasks: -  - include_role: -      name: lib_os_firewall - -  - name: set allow rules -    os_firewall_manage_iptables: -      name: "{{ item.service }}" -      action: add -      protocol: "{{ item.port.split('/')[1] }}" -      port: "{{ item.port.split('/')[0] }}" -    with_items: "{{ os_firewall_allow }}" -``` - - -License -------- - -Apache License, Version 2.0 - -Author Information ------------------- -Jason DeTiberus - jdetiber@redhat.com diff --git a/roles/lib_utils/action_plugins/sanity_checks.py b/roles/lib_utils/action_plugins/sanity_checks.py new file mode 100644 index 000000000..1bf332678 --- /dev/null +++ b/roles/lib_utils/action_plugins/sanity_checks.py @@ -0,0 +1,126 @@ +""" +Ansible action plugin to ensure inventory variables are set +appropriately and no conflicting options have been provided. +""" +from ansible.plugins.action import ActionBase +from ansible import errors + +# Valid values for openshift_deployment_type +VALID_DEPLOYMENT_TYPES = ('origin', 'openshift-enterprise') + +# Tuple of variable names and default values if undefined. +NET_PLUGIN_LIST = (('openshift_use_openshift_sdn', True), +                   ('openshift_use_flannel', False), +                   ('openshift_use_nuage', False), +                   ('openshift_use_contiv', False), +                   ('openshift_use_calico', False)) + + +def to_bool(var_to_check): +    """Determine a boolean value given the multiple +       ways bools can be specified in ansible.""" +    # http://yaml.org/type/bool.html +    yes_list = (True, 1, "True", "1", "true", "TRUE", +                "Yes", "yes", "Y", "y", "YES", +                "on", "ON", "On") +    return var_to_check in yes_list + + +class ActionModule(ActionBase): +    """Action plugin to execute sanity checks.""" +    def template_var(self, hostvars, host, varname): +        """Retrieve a variable from hostvars and template it. +           If undefined, return None type.""" +        res = hostvars[host].get(varname) +        if res is None: +            return None +        return self._templar.template(res) + +    def check_openshift_deployment_type(self, hostvars, host): +        """Ensure a valid openshift_deployment_type is set""" +        openshift_deployment_type = self.template_var(hostvars, host, +                                                      'openshift_deployment_type') +        if openshift_deployment_type not in VALID_DEPLOYMENT_TYPES: +            type_strings = ", ".join(VALID_DEPLOYMENT_TYPES) +            msg = "openshift_deployment_type must be defined and one of {}".format(type_strings) +            raise errors.AnsibleModuleError(msg) + +    def check_python_version(self, hostvars, host, distro): +        """Ensure python version is 3 for Fedora and python 2 for others""" +        ansible_python = self.template_var(hostvars, host, 'ansible_python') +        if distro == "Fedora": +            if ansible_python['version']['major'] != 3: +                msg = "openshift-ansible requires Python 3 for {};".format(distro) +                msg += " For information on enabling Python 3 with Ansible," +                msg += " see https://docs.ansible.com/ansible/python_3_support.html" +                raise errors.AnsibleModuleError(msg) +        else: +            if ansible_python['version']['major'] != 2: +                msg = "openshift-ansible requires Python 2 for {};".format(distro) + +    def network_plugin_check(self, hostvars, host): +        """Ensure only one type of network plugin is enabled""" +        res = [] +        # Loop through each possible network plugin boolean, determine the +        # actual boolean value, and append results into a list. +        for plugin, default_val in NET_PLUGIN_LIST: +            res_temp = self.template_var(hostvars, host, plugin) +            if res_temp is None: +                res_temp = default_val +            res.append(to_bool(res_temp)) + +        if sum(res) != 1: +            plugin_str = list(zip([x[0] for x in NET_PLUGIN_LIST], res)) + +            msg = "Host Checked: {} Only one of must be true. Found: {}".format(host, plugin_str) +            raise errors.AnsibleModuleError(msg) + +    def check_hostname_vars(self, hostvars, host): +        """Checks to ensure openshift_hostname +           and openshift_public_hostname +           conform to the proper length of 63 characters or less""" +        for varname in ('openshift_public_hostname', 'openshift_hostname'): +            var_value = self.template_var(hostvars, host, varname) +            if var_value and len(var_value) > 63: +                msg = '{} must be 63 characters or less'.format(varname) +                raise errors.AnsibleModuleError(msg) + +    def run_checks(self, hostvars, host): +        """Execute the hostvars validations against host""" +        distro = self.template_var(hostvars, host, 'ansible_distribution') +        self.check_openshift_deployment_type(hostvars, host) +        self.check_python_version(hostvars, host, distro) +        self.network_plugin_check(hostvars, host) +        self.check_hostname_vars(hostvars, host) + +    def run(self, tmp=None, task_vars=None): +        result = super(ActionModule, self).run(tmp, task_vars) + +        # self.task_vars holds all in-scope variables. +        # Ignore settting self.task_vars outside of init. +        # pylint: disable=W0201 +        self.task_vars = task_vars or {} + +        # self._task.args holds task parameters. +        # check_hosts is a parameter to this plugin, and should provide +        # a list of hosts. +        check_hosts = self._task.args.get('check_hosts') +        if not check_hosts: +            msg = "check_hosts is required" +            raise errors.AnsibleModuleError(msg) + +        # We need to access each host's variables +        hostvars = self.task_vars.get('hostvars') +        if not hostvars: +            msg = hostvars +            raise errors.AnsibleModuleError(msg) + +        # We loop through each host in the provided list check_hosts +        for host in check_hosts: +            self.run_checks(hostvars, host) + +        result["changed"] = False +        result["failed"] = False +        result["msg"] = "Sanity Checks passed" + +        return result diff --git a/roles/lib_utils/callback_plugins/aa_version_requirement.py b/roles/lib_utils/callback_plugins/aa_version_requirement.py new file mode 100644 index 000000000..1093acdae --- /dev/null +++ b/roles/lib_utils/callback_plugins/aa_version_requirement.py @@ -0,0 +1,60 @@ +#!/usr/bin/python + +""" +This callback plugin verifies the required minimum version of Ansible +is installed for proper operation of the OpenShift Ansible Installer. +The plugin is named with leading `aa_` to ensure this plugin is loaded +first (alphanumerically) by Ansible. +""" +import sys +from ansible import __version__ + +if __version__ < '2.0': +    # pylint: disable=import-error,no-name-in-module +    # Disabled because pylint warns when Ansible v2 is installed +    from ansible.callbacks import display as pre2_display +    CallbackBase = object + +    def display(*args, **kwargs): +        """Set up display function for pre Ansible v2""" +        pre2_display(*args, **kwargs) +else: +    from ansible.plugins.callback import CallbackBase +    from ansible.utils.display import Display + +    def display(*args, **kwargs): +        """Set up display function for Ansible v2""" +        display_instance = Display() +        display_instance.display(*args, **kwargs) + + +# Set to minimum required Ansible version +REQUIRED_VERSION = '2.4.1.0' +DESCRIPTION = "Supported versions: %s or newer" % REQUIRED_VERSION + + +def version_requirement(version): +    """Test for minimum required version""" +    return version >= REQUIRED_VERSION + + +class CallbackModule(CallbackBase): +    """ +    Ansible callback plugin +    """ + +    CALLBACK_VERSION = 1.0 +    CALLBACK_NAME = 'version_requirement' + +    def __init__(self): +        """ +        Version verification is performed in __init__ to catch the +        requirement early in the execution of Ansible and fail gracefully +        """ +        super(CallbackModule, self).__init__() + +        if not version_requirement(__version__): +            display( +                'FATAL: Current Ansible version (%s) is not supported. %s' +                % (__version__, DESCRIPTION), color='red') +            sys.exit(1) diff --git a/roles/lib_utils/callback_plugins/openshift_quick_installer.py b/roles/lib_utils/callback_plugins/openshift_quick_installer.py new file mode 100644 index 000000000..c0fdbc650 --- /dev/null +++ b/roles/lib_utils/callback_plugins/openshift_quick_installer.py @@ -0,0 +1,360 @@ +# pylint: disable=invalid-name,protected-access,import-error,line-too-long,attribute-defined-outside-init + +# This program is free software: you can redistribute it and/or modify +# it under the terms of the GNU General Public License as published by +# the Free Software Foundation, either version 3 of the License, or +# (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE.  See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program.  If not, see <http://www.gnu.org/licenses/>. + +"""This file is a stdout callback plugin for the OpenShift Quick +Installer. The purpose of this callback plugin is to reduce the amount +of produced output for customers and enable simpler progress checking. + +What's different: + +* Playbook progress is expressed as: Play <current_play>/<total_plays> (Play Name) +  Ex: Play 3/30 (Initialize Megafrobber) + +* The Tasks and Handlers in each play (and included roles) are printed +  as a series of .'s following the play progress line. + +* Many of these methods include copy and paste code from the upstream +  default.py callback. We do that to give us control over the stdout +  output while allowing Ansible to handle the file logging +  normally. The biggest changes here are that we are manually setting +  `log_only` to True in the Display.display method and we redefine the +  Display.banner method locally so we can set log_only on that call as +  well. + +""" + +from __future__ import (absolute_import, print_function) +import sys +from ansible import constants as C +from ansible.plugins.callback import CallbackBase +from ansible.utils.color import colorize, hostcolor + + +class CallbackModule(CallbackBase): + +    """ +    Ansible callback plugin +    """ +    CALLBACK_VERSION = 2.2 +    CALLBACK_TYPE = 'stdout' +    CALLBACK_NAME = 'openshift_quick_installer' +    CALLBACK_NEEDS_WHITELIST = False +    plays_count = 0 +    plays_total_ran = 0 + +    def __init__(self): +        """Constructor, ensure standard self.*s are set""" +        self._play = None +        self._last_task_banner = None +        super(CallbackModule, self).__init__() + +    def banner(self, msg, color=None): +        '''Prints a header-looking line with stars taking up to 80 columns +        of width (3 columns, minimum) + +        Overrides the upstream banner method so that display is called +        with log_only=True +        ''' +        msg = msg.strip() +        star_len = (79 - len(msg)) +        if star_len < 0: +            star_len = 3 +        stars = "*" * star_len +        self._display.display("\n%s %s" % (msg, stars), color=color, log_only=True) + +    def _print_task_banner(self, task): +        """Imported from the upstream 'default' callback""" +        # args can be specified as no_log in several places: in the task or in +        # the argument spec.  We can check whether the task is no_log but the +        # argument spec can't be because that is only run on the target +        # machine and we haven't run it thereyet at this time. +        # +        # So we give people a config option to affect display of the args so +        # that they can secure this if they feel that their stdout is insecure +        # (shoulder surfing, logging stdout straight to a file, etc). +        args = '' +        if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: +            args = ', '.join('%s=%s' % a for a in task.args.items()) +            args = ' %s' % args + +        self.banner(u"TASK [%s%s]" % (task.get_name().strip(), args)) +        if self._display.verbosity >= 2: +            path = task.get_path() +            if path: +                self._display.display(u"task path: %s" % path, color=C.COLOR_DEBUG, log_only=True) + +        self._last_task_banner = task._uuid + +    def v2_playbook_on_start(self, playbook): +        """This is basically the start of it all""" +        self.plays_count = len(playbook.get_plays()) +        self.plays_total_ran = 0 + +        if self._display.verbosity > 1: +            from os.path import basename +            self.banner("PLAYBOOK: %s" % basename(playbook._file_name)) + +    def v2_playbook_on_play_start(self, play): +        """Each play calls this once before running any tasks + +We could print the number of tasks here as well by using +`play.get_tasks()` but that is not accurate when a play includes a +role. Only the tasks directly assigned to a play are exposed in the +`play` object. +        """ +        self.plays_total_ran += 1 +        print("") +        print("Play %s/%s (%s)" % (self.plays_total_ran, self.plays_count, play.get_name())) + +        name = play.get_name().strip() +        if not name: +            msg = "PLAY" +        else: +            msg = "PLAY [%s]" % name + +        self._play = play + +        self.banner(msg) + +    # pylint: disable=unused-argument,no-self-use +    def v2_playbook_on_task_start(self, task, is_conditional): +        """This prints out the task header. For example: + +TASK [openshift_facts : Ensure PyYaml is installed] ***... + +Rather than print out all that for every task, we print a dot +character to indicate a task has been started. +        """ +        sys.stdout.write('.') + +        args = '' +        # args can be specified as no_log in several places: in the task or in +        # the argument spec.  We can check whether the task is no_log but the +        # argument spec can't be because that is only run on the target +        # machine and we haven't run it thereyet at this time. +        # +        # So we give people a config option to affect display of the args so +        # that they can secure this if they feel that their stdout is insecure +        # (shoulder surfing, logging stdout straight to a file, etc). +        if not task.no_log and C.DISPLAY_ARGS_TO_STDOUT: +            args = ', '.join(('%s=%s' % a for a in task.args.items())) +            args = ' %s' % args +        self.banner("TASK [%s%s]" % (task.get_name().strip(), args)) +        if self._display.verbosity >= 2: +            path = task.get_path() +            if path: +                self._display.display("task path: %s" % path, color=C.COLOR_DEBUG, log_only=True) + +    # pylint: disable=unused-argument,no-self-use +    def v2_playbook_on_handler_task_start(self, task): +        """Print out task header for handlers + +Rather than print out a header for every handler, we print a dot +character to indicate a handler task has been started. +""" +        sys.stdout.write('.') + +        self.banner("RUNNING HANDLER [%s]" % task.get_name().strip()) + +    # pylint: disable=unused-argument,no-self-use +    def v2_playbook_on_cleanup_task_start(self, task): +        """Print out a task header for cleanup tasks + +Rather than print out a header for every handler, we print a dot +character to indicate a handler task has been started. +""" +        sys.stdout.write('.') + +        self.banner("CLEANUP TASK [%s]" % task.get_name().strip()) + +    def v2_playbook_on_include(self, included_file): +        """Print out paths to statically included files""" +        msg = 'included: %s for %s' % (included_file._filename, ", ".join([h.name for h in included_file._hosts])) +        self._display.display(msg, color=C.COLOR_SKIP, log_only=True) + +    def v2_runner_on_ok(self, result): +        """This prints out task results in a fancy format + +The only thing we change here is adding `log_only=True` to the +.display() call +        """ +        delegated_vars = result._result.get('_ansible_delegated_vars', None) +        self._clean_results(result._result, result._task.action) +        if result._task.action in ('include', 'include_role'): +            return +        elif result._result.get('changed', False): +            if delegated_vars: +                msg = "changed: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) +            else: +                msg = "changed: [%s]" % result._host.get_name() +            color = C.COLOR_CHANGED +        else: +            if delegated_vars: +                msg = "ok: [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) +            else: +                msg = "ok: [%s]" % result._host.get_name() +            color = C.COLOR_OK + +        if result._task.loop and 'results' in result._result: +            self._process_items(result) +        else: + +            if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result: +                msg += " => %s" % (self._dump_results(result._result),) +            self._display.display(msg, color=color, log_only=True) + +        self._handle_warnings(result._result) + +    def v2_runner_item_on_ok(self, result): +        """Print out task results for items you're iterating over""" +        delegated_vars = result._result.get('_ansible_delegated_vars', None) +        if result._task.action in ('include', 'include_role'): +            return +        elif result._result.get('changed', False): +            msg = 'changed' +            color = C.COLOR_CHANGED +        else: +            msg = 'ok' +            color = C.COLOR_OK + +        if delegated_vars: +            msg += ": [%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) +        else: +            msg += ": [%s]" % result._host.get_name() + +        msg += " => (item=%s)" % (self._get_item(result._result),) + +        if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result: +            msg += " => %s" % self._dump_results(result._result) +        self._display.display(msg, color=color, log_only=True) + +    def v2_runner_item_on_skipped(self, result): +        """Print out task results when an item is skipped""" +        if C.DISPLAY_SKIPPED_HOSTS: +            msg = "skipping: [%s] => (item=%s) " % (result._host.get_name(), self._get_item(result._result)) +            if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result: +                msg += " => %s" % self._dump_results(result._result) +            self._display.display(msg, color=C.COLOR_SKIP, log_only=True) + +    def v2_runner_on_skipped(self, result): +        """Print out task results when a task (or something else?) is skipped""" +        if C.DISPLAY_SKIPPED_HOSTS: +            if result._task.loop and 'results' in result._result: +                self._process_items(result) +            else: +                msg = "skipping: [%s]" % result._host.get_name() +                if (self._display.verbosity > 0 or '_ansible_verbose_always' in result._result) and '_ansible_verbose_override' not in result._result: +                    msg += " => %s" % self._dump_results(result._result) +                self._display.display(msg, color=C.COLOR_SKIP, log_only=True) + +    def v2_playbook_on_notify(self, res, handler): +        """What happens when a task result is 'changed' and the task has a +'notify' list attached. +        """ +        self._display.display("skipping: no hosts matched", color=C.COLOR_SKIP, log_only=True) + +    ###################################################################### +    # So we can bubble up errors to the top +    def v2_runner_on_failed(self, result, ignore_errors=False): +        """I guess this is when an entire task has failed?""" + +        if self._play.strategy == 'free' and self._last_task_banner != result._task._uuid: +            self._print_task_banner(result._task) + +        delegated_vars = result._result.get('_ansible_delegated_vars', None) +        if 'exception' in result._result: +            if self._display.verbosity < 3: +                # extract just the actual error message from the exception text +                error = result._result['exception'].strip().split('\n')[-1] +                msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error +            else: +                msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] + +            self._display.display(msg, color=C.COLOR_ERROR) + +        if result._task.loop and 'results' in result._result: +            self._process_items(result) + +        else: +            if delegated_vars: +                self._display.display("fatal: [%s -> %s]: FAILED! => %s" % (result._host.get_name(), delegated_vars['ansible_host'], self._dump_results(result._result)), color=C.COLOR_ERROR) +            else: +                self._display.display("fatal: [%s]: FAILED! => %s" % (result._host.get_name(), self._dump_results(result._result)), color=C.COLOR_ERROR) + +        if ignore_errors: +            self._display.display("...ignoring", color=C.COLOR_SKIP) + +    def v2_runner_item_on_failed(self, result): +        """When an item in a task fails.""" +        delegated_vars = result._result.get('_ansible_delegated_vars', None) +        if 'exception' in result._result: +            if self._display.verbosity < 3: +                # extract just the actual error message from the exception text +                error = result._result['exception'].strip().split('\n')[-1] +                msg = "An exception occurred during task execution. To see the full traceback, use -vvv. The error was: %s" % error +            else: +                msg = "An exception occurred during task execution. The full traceback is:\n" + result._result['exception'] + +            self._display.display(msg, color=C.COLOR_ERROR) + +        msg = "failed: " +        if delegated_vars: +            msg += "[%s -> %s]" % (result._host.get_name(), delegated_vars['ansible_host']) +        else: +            msg += "[%s]" % (result._host.get_name()) + +        self._display.display(msg + " (item=%s) => %s" % (self._get_item(result._result), self._dump_results(result._result)), color=C.COLOR_ERROR) +        self._handle_warnings(result._result) + +    ###################################################################### +    def v2_playbook_on_stats(self, stats): +        """Print the final playbook run stats""" +        self._display.display("", screen_only=True) +        self.banner("PLAY RECAP") + +        hosts = sorted(stats.processed.keys()) +        for h in hosts: +            t = stats.summarize(h) + +            self._display.display( +                u"%s : %s %s %s %s" % ( +                    hostcolor(h, t), +                    colorize(u'ok', t['ok'], C.COLOR_OK), +                    colorize(u'changed', t['changed'], C.COLOR_CHANGED), +                    colorize(u'unreachable', t['unreachable'], C.COLOR_UNREACHABLE), +                    colorize(u'failed', t['failures'], C.COLOR_ERROR)), +                screen_only=True +            ) + +            self._display.display( +                u"%s : %s %s %s %s" % ( +                    hostcolor(h, t, False), +                    colorize(u'ok', t['ok'], None), +                    colorize(u'changed', t['changed'], None), +                    colorize(u'unreachable', t['unreachable'], None), +                    colorize(u'failed', t['failures'], None)), +                log_only=True +            ) + +        self._display.display("", screen_only=True) +        self._display.display("", screen_only=True) + +        # Some plays are conditional and won't run (such as load +        # balancers) if they aren't required. Sometimes plays are +        # conditionally included later in the run. Let the user know +        # about this to avoid potential confusion. +        if self.plays_total_ran != self.plays_count: +            print("Installation Complete: Note: Play count is only an estimate, some plays may have been skipped or dynamically added") +            self._display.display("", screen_only=True) diff --git a/roles/lib_utils/filter_plugins/oo_filters.py b/roles/lib_utils/filter_plugins/oo_filters.py new file mode 100644 index 000000000..a2ea287cf --- /dev/null +++ b/roles/lib_utils/filter_plugins/oo_filters.py @@ -0,0 +1,621 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +# pylint: disable=too-many-lines +""" +Custom filters for use in openshift-ansible +""" +import os +import pdb +import random +import re + +from base64 import b64encode +from collections import Mapping +# pylint no-name-in-module and import-error disabled here because pylint +# fails to properly detect the packages when installed in a virtualenv +from distutils.util import strtobool  # pylint:disable=no-name-in-module,import-error +from operator import itemgetter + +import yaml + +from ansible import errors +from ansible.parsing.yaml.dumper import AnsibleDumper + +# ansible.compat.six goes away with Ansible 2.4 +try: +    from ansible.compat.six import string_types, u +    from ansible.compat.six.moves.urllib.parse import urlparse +except ImportError: +    from ansible.module_utils.six import string_types, u +    from ansible.module_utils.six.moves.urllib.parse import urlparse + +HAS_OPENSSL = False +try: +    import OpenSSL.crypto +    HAS_OPENSSL = True +except ImportError: +    pass + + +# pylint: disable=C0103 + +def lib_utils_oo_pdb(arg): +    """ This pops you into a pdb instance where arg is the data passed in +        from the filter. +        Ex: "{{ hostvars | lib_utils_oo_pdb }}" +    """ +    pdb.set_trace() +    return arg + + +def get_attr(data, attribute=None): +    """ This looks up dictionary attributes of the form a.b.c and returns +        the value. + +        If the key isn't present, None is returned. +        Ex: data = {'a': {'b': {'c': 5}}} +            attribute = "a.b.c" +            returns 5 +    """ +    if not attribute: +        raise errors.AnsibleFilterError("|failed expects attribute to be set") + +    ptr = data +    for attr in attribute.split('.'): +        if attr in ptr: +            ptr = ptr[attr] +        else: +            ptr = None +            break + +    return ptr + + +def oo_flatten(data): +    """ This filter plugin will flatten a list of lists +    """ +    if not isinstance(data, list): +        raise errors.AnsibleFilterError("|failed expects to flatten a List") + +    return [item for sublist in data for item in sublist] + + +def lib_utils_oo_collect(data_list, attribute=None, filters=None): +    """ This takes a list of dict and collects all attributes specified into a +        list. If filter is specified then we will include all items that +        match _ALL_ of filters.  If a dict entry is missing the key in a +        filter it will be excluded from the match. +        Ex: data_list = [ {'a':1, 'b':5, 'z': 'z'}, # True, return +                          {'a':2, 'z': 'z'},        # True, return +                          {'a':3, 'z': 'z'},        # True, return +                          {'a':4, 'z': 'b'},        # FAILED, obj['z'] != obj['z'] +                        ] +            attribute = 'a' +            filters   = {'z': 'z'} +            returns [1, 2, 3] + +        This also deals with lists of lists with dict as elements. +        Ex: data_list = [ +                          [ {'a':1, 'b':5, 'z': 'z'}, # True, return +                            {'a':2, 'b':6, 'z': 'z'}  # True, return +                          ], +                          [ {'a':3, 'z': 'z'},        # True, return +                            {'a':4, 'z': 'b'}         # FAILED, obj['z'] != obj['z'] +                          ], +                          {'a':5, 'z': 'z'},          # True, return +                        ] +            attribute = 'a' +            filters   = {'z': 'z'} +            returns [1, 2, 3, 5] +    """ +    if not isinstance(data_list, list): +        raise errors.AnsibleFilterError("lib_utils_oo_collect expects to filter on a List") + +    if not attribute: +        raise errors.AnsibleFilterError("lib_utils_oo_collect expects attribute to be set") + +    data = [] +    retval = [] + +    for item in data_list: +        if isinstance(item, list): +            retval.extend(lib_utils_oo_collect(item, attribute, filters)) +        else: +            data.append(item) + +    if filters is not None: +        if not isinstance(filters, dict): +            raise errors.AnsibleFilterError( +                "lib_utils_oo_collect expects filter to be a dict") +        retval.extend([get_attr(d, attribute) for d in data if ( +            all([d.get(key, None) == filters[key] for key in filters]))]) +    else: +        retval.extend([get_attr(d, attribute) for d in data]) + +    retval = [val for val in retval if val is not None] + +    return retval + + +def lib_utils_oo_select_keys_from_list(data, keys): +    """ This returns a list, which contains the value portions for the keys +        Ex: data = { 'a':1, 'b':2, 'c':3 } +            keys = ['a', 'c'] +            returns [1, 3] +    """ + +    if not isinstance(data, list): +        raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects to filter on a list") + +    if not isinstance(keys, list): +        raise errors.AnsibleFilterError("|lib_utils_oo_select_keys_from_list failed expects first param is a list") + +    # Gather up the values for the list of keys passed in +    retval = [lib_utils_oo_select_keys(item, keys) for item in data] + +    return oo_flatten(retval) + + +def lib_utils_oo_select_keys(data, keys): +    """ This returns a list, which contains the value portions for the keys +        Ex: data = { 'a':1, 'b':2, 'c':3 } +            keys = ['a', 'c'] +            returns [1, 3] +    """ + +    if not isinstance(data, Mapping): +        raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects to filter on a dict or object") + +    if not isinstance(keys, list): +        raise errors.AnsibleFilterError("|lib_utils_oo_select_keys failed expects first param is a list") + +    # Gather up the values for the list of keys passed in +    retval = [data[key] for key in keys if key in data] + +    return retval + + +def lib_utils_oo_prepend_strings_in_list(data, prepend): +    """ This takes a list of strings and prepends a string to each item in the +        list +        Ex: data = ['cart', 'tree'] +            prepend = 'apple-' +            returns ['apple-cart', 'apple-tree'] +    """ +    if not isinstance(data, list): +        raise errors.AnsibleFilterError("|failed expects first param is a list") +    if not all(isinstance(x, string_types) for x in data): +        raise errors.AnsibleFilterError("|failed expects first param is a list" +                                        " of strings") +    retval = [prepend + s for s in data] +    return retval + + +def lib_utils_oo_dict_to_list_of_dict(data, key_title='key', value_title='value'): +    """Take a dict and arrange them as a list of dicts + +       Input data: +       {'region': 'infra', 'test_k': 'test_v'} + +       Return data: +       [{'key': 'region', 'value': 'infra'}, {'key': 'test_k', 'value': 'test_v'}] + +       Written for use of the oc_label module +    """ +    if not isinstance(data, dict): +        # pylint: disable=line-too-long +        raise errors.AnsibleFilterError("|failed expects first param is a dict. Got %s. Type: %s" % (str(data), str(type(data)))) + +    rval = [] +    for label in data.items(): +        rval.append({key_title: label[0], value_title: label[1]}) + +    return rval + + +def oo_ami_selector(data, image_name): +    """ This takes a list of amis and an image name and attempts to return +        the latest ami. +    """ +    if not isinstance(data, list): +        raise errors.AnsibleFilterError("|failed expects first param is a list") + +    if not data: +        return None +    else: +        if image_name is None or not image_name.endswith('_*'): +            ami = sorted(data, key=itemgetter('name'), reverse=True)[0] +            return ami['ami_id'] +        else: +            ami_info = [(ami, ami['name'].split('_')[-1]) for ami in data] +            ami = sorted(ami_info, key=itemgetter(1), reverse=True)[0][0] +            return ami['ami_id'] + + +def lib_utils_oo_split(string, separator=','): +    """ This splits the input string into a list. If the input string is +    already a list we will return it as is. +    """ +    if isinstance(string, list): +        return string +    return string.split(separator) + + +def lib_utils_oo_dict_to_keqv_list(data): +    """Take a dict and return a list of k=v pairs + +        Input data: +        {'a': 1, 'b': 2} + +        Return data: +        ['a=1', 'b=2'] +    """ +    return ['='.join(str(e) for e in x) for x in data.items()] + + +def lib_utils_oo_list_to_dict(lst, separator='='): +    """ This converts a list of ["k=v"] to a dictionary {k: v}. +    """ +    kvs = [i.split(separator) for i in lst] +    return {k: v for k, v in kvs} + + +def haproxy_backend_masters(hosts, port): +    """ This takes an array of dicts and returns an array of dicts +        to be used as a backend for the haproxy role +    """ +    servers = [] +    for idx, host_info in enumerate(hosts): +        server = dict(name="master%s" % idx) +        server_ip = host_info['openshift']['common']['ip'] +        server['address'] = "%s:%s" % (server_ip, port) +        server['opts'] = 'check' +        servers.append(server) +    return servers + + +# pylint: disable=too-many-branches +def lib_utils_oo_parse_named_certificates(certificates, named_certs_dir, internal_hostnames): +    """ Parses names from list of certificate hashes. + +        Ex: certificates = [{ "certfile": "/root/custom1.crt", +                              "keyfile": "/root/custom1.key", +                               "cafile": "/root/custom-ca1.crt" }, +                            { "certfile": "custom2.crt", +                              "keyfile": "custom2.key", +                              "cafile": "custom-ca2.crt" }] + +            returns [{ "certfile": "/etc/origin/master/named_certificates/custom1.crt", +                       "keyfile": "/etc/origin/master/named_certificates/custom1.key", +                       "cafile": "/etc/origin/master/named_certificates/custom-ca1.crt", +                       "names": [ "public-master-host.com", +                                  "other-master-host.com" ] }, +                     { "certfile": "/etc/origin/master/named_certificates/custom2.crt", +                       "keyfile": "/etc/origin/master/named_certificates/custom2.key", +                       "cafile": "/etc/origin/master/named_certificates/custom-ca-2.crt", +                       "names": [ "some-hostname.com" ] }] +    """ +    if not isinstance(named_certs_dir, string_types): +        raise errors.AnsibleFilterError("|failed expects named_certs_dir is str or unicode") + +    if not isinstance(internal_hostnames, list): +        raise errors.AnsibleFilterError("|failed expects internal_hostnames is list") + +    if not HAS_OPENSSL: +        raise errors.AnsibleFilterError("|missing OpenSSL python bindings") + +    for certificate in certificates: +        if 'names' in certificate.keys(): +            continue +        else: +            certificate['names'] = [] + +        if not os.path.isfile(certificate['certfile']) or not os.path.isfile(certificate['keyfile']): +            raise errors.AnsibleFilterError("|certificate and/or key does not exist '%s', '%s'" % +                                            (certificate['certfile'], certificate['keyfile'])) + +        try: +            st_cert = open(certificate['certfile'], 'rt').read() +            cert = OpenSSL.crypto.load_certificate(OpenSSL.crypto.FILETYPE_PEM, st_cert) +            certificate['names'].append(str(cert.get_subject().commonName.decode())) +            for i in range(cert.get_extension_count()): +                if cert.get_extension(i).get_short_name() == 'subjectAltName': +                    for name in str(cert.get_extension(i)).replace('DNS:', '').split(', '): +                        certificate['names'].append(name) +        except Exception: +            raise errors.AnsibleFilterError(("|failed to parse certificate '%s', " % certificate['certfile'] + +                                             "please specify certificate names in host inventory")) + +        certificate['names'] = list(set(certificate['names'])) +        if 'cafile' not in certificate: +            certificate['names'] = [name for name in certificate['names'] if name not in internal_hostnames] +            if not certificate['names']: +                raise errors.AnsibleFilterError(("|failed to parse certificate '%s' or " % certificate['certfile'] + +                                                 "detected a collision with internal hostname, please specify " + +                                                 "certificate names in host inventory")) + +    for certificate in certificates: +        # Update paths for configuration +        certificate['certfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['certfile'])) +        certificate['keyfile'] = os.path.join(named_certs_dir, os.path.basename(certificate['keyfile'])) +        if 'cafile' in certificate: +            certificate['cafile'] = os.path.join(named_certs_dir, os.path.basename(certificate['cafile'])) +    return certificates + + +def lib_utils_oo_generate_secret(num_bytes): +    """ generate a session secret """ + +    if not isinstance(num_bytes, int): +        raise errors.AnsibleFilterError("|failed expects num_bytes is int") + +    return b64encode(os.urandom(num_bytes)).decode('utf-8') + + +def lib_utils_to_padded_yaml(data, level=0, indent=2, **kw): +    """ returns a yaml snippet padded to match the indent level you specify """ +    if data in [None, ""]: +        return "" + +    try: +        transformed = u(yaml.dump(data, indent=indent, allow_unicode=True, +                                  default_flow_style=False, +                                  Dumper=AnsibleDumper, **kw)) +        padded = "\n".join([" " * level * indent + line for line in transformed.splitlines()]) +        return "\n{0}".format(padded) +    except Exception as my_e: +        raise errors.AnsibleFilterError('Failed to convert: %s' % my_e) + + +def lib_utils_oo_pods_match_component(pods, deployment_type, component): +    """ Filters a list of Pods and returns the ones matching the deployment_type and component +    """ +    if not isinstance(pods, list): +        raise errors.AnsibleFilterError("failed expects to filter on a list") +    if not isinstance(deployment_type, string_types): +        raise errors.AnsibleFilterError("failed expects deployment_type to be a string") +    if not isinstance(component, string_types): +        raise errors.AnsibleFilterError("failed expects component to be a string") + +    image_prefix = 'openshift/origin-' +    if deployment_type == 'openshift-enterprise': +        image_prefix = 'openshift3/ose-' + +    matching_pods = [] +    image_regex = image_prefix + component + r'.*' +    for pod in pods: +        for container in pod['spec']['containers']: +            if re.search(image_regex, container['image']): +                matching_pods.append(pod) +                break  # stop here, don't add a pod more than once + +    return matching_pods + + +def lib_utils_oo_image_tag_to_rpm_version(version, include_dash=False): +    """ Convert an image tag string to an RPM version if necessary +        Empty strings and strings that are already in rpm version format +        are ignored. Also remove non semantic version components. + +        Ex. v3.2.0.10 -> -3.2.0.10 +            v1.2.0-rc1 -> -1.2.0 +    """ +    if not isinstance(version, string_types): +        raise errors.AnsibleFilterError("|failed expects a string or unicode") +    if version.startswith("v"): +        version = version[1:] +        # Strip release from requested version, we no longer support this. +        version = version.split('-')[0] + +    if include_dash and version and not version.startswith("-"): +        version = "-" + version + +    return version + + +def lib_utils_oo_hostname_from_url(url): +    """ Returns the hostname contained in a URL + +        Ex: https://ose3-master.example.com/v1/api -> ose3-master.example.com +    """ +    if not isinstance(url, string_types): +        raise errors.AnsibleFilterError("|failed expects a string or unicode") +    parse_result = urlparse(url) +    if parse_result.netloc != '': +        return parse_result.netloc +    else: +        # netloc wasn't parsed, assume url was missing scheme and path +        return parse_result.path + + +# pylint: disable=invalid-name, unused-argument +def lib_utils_oo_loadbalancer_frontends( +        api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None): +    """TODO: Document me.""" +    loadbalancer_frontends = [{'name': 'atomic-openshift-api', +                               'mode': 'tcp', +                               'options': ['tcplog'], +                               'binds': ["*:{0}".format(api_port)], +                               'default_backend': 'atomic-openshift-api'}] +    if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None: +        loadbalancer_frontends.append({'name': 'nuage-monitor', +                                       'mode': 'tcp', +                                       'options': ['tcplog'], +                                       'binds': ["*:{0}".format(nuage_rest_port)], +                                       'default_backend': 'nuage-monitor'}) +    return loadbalancer_frontends + + +# pylint: disable=invalid-name +def lib_utils_oo_loadbalancer_backends( +        api_port, servers_hostvars, use_nuage=False, nuage_rest_port=None): +    """TODO: Document me.""" +    loadbalancer_backends = [{'name': 'atomic-openshift-api', +                              'mode': 'tcp', +                              'option': 'tcplog', +                              'balance': 'source', +                              'servers': haproxy_backend_masters(servers_hostvars, api_port)}] +    if bool(strtobool(str(use_nuage))) and nuage_rest_port is not None: +        # pylint: disable=line-too-long +        loadbalancer_backends.append({'name': 'nuage-monitor', +                                      'mode': 'tcp', +                                      'option': 'tcplog', +                                      'balance': 'source', +                                      'servers': haproxy_backend_masters(servers_hostvars, nuage_rest_port)}) +    return loadbalancer_backends + + +def lib_utils_oo_chomp_commit_offset(version): +    """Chomp any "+git.foo" commit offset string from the given `version` +    and return the modified version string. + +Ex: +- chomp_commit_offset(None)                 => None +- chomp_commit_offset(1337)                 => "1337" +- chomp_commit_offset("v3.4.0.15+git.derp") => "v3.4.0.15" +- chomp_commit_offset("v3.4.0.15")          => "v3.4.0.15" +- chomp_commit_offset("v1.3.0+52492b4")     => "v1.3.0" +    """ +    if version is None: +        return version +    else: +        # Stringify, just in case it's a Number type. Split by '+' and +        # return the first split. No concerns about strings without a +        # '+', .split() returns an array of the original string. +        return str(version).split('+')[0] + + +def lib_utils_oo_random_word(length, source='abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'): +    """Generates a random string of given length from a set of alphanumeric characters. +       The default source uses [a-z][A-Z][0-9] +       Ex: +       - lib_utils_oo_random_word(3)                => aB9 +       - lib_utils_oo_random_word(4, source='012')  => 0123 +    """ +    return ''.join(random.choice(source) for i in range(length)) + + +def lib_utils_oo_contains_rule(source, apiGroups, resources, verbs): +    '''Return true if the specified rule is contained within the provided source''' + +    rules = source['rules'] + +    if rules: +        for rule in rules: +            if set(rule['apiGroups']) == set(apiGroups): +                if set(rule['resources']) == set(resources): +                    if set(rule['verbs']) == set(verbs): +                        return True + +    return False + + +def lib_utils_oo_selector_to_string_list(user_dict): +    """Convert a dict of selectors to a key=value list of strings + +Given input of {'region': 'infra', 'zone': 'primary'} returns a list +of items as ['region=infra', 'zone=primary'] +    """ +    selectors = [] +    for key in user_dict: +        selectors.append("{}={}".format(key, user_dict[key])) +    return selectors + + +def lib_utils_oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'): +    """Parse the Service Account Secrets list, `sa_secrets`, (as from +oc_serviceaccount_secret:state=list) and return the name of the secret +containing the `secret_hint` string. For example, by default this will +return the name of the secret holding the SA bearer token. + +Only provide the 'results' object to this filter. This filter expects +to receive a list like this: + +    [ +        { +            "name": "management-admin-dockercfg-p31s2" +        }, +        { +            "name": "management-admin-token-bnqsh" +        } +    ] + + +Returns: + +* `secret_name` [string] - The name of the secret matching the +  `secret_hint` parameter. By default this is the secret holding the +  SA's bearer token. + +Example playbook usage: + +Register a return value from oc_serviceaccount_secret with and pass +that result to this filter plugin. + +    - name: Get all SA Secrets +      oc_serviceaccount_secret: +        state: list +        service_account: management-admin +        namespace: management-infra +      register: sa + +    - name: Save the SA bearer token secret name +      set_fact: +        management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}" + +    - name: Get the SA bearer token value +      oc_secret: +        state: list +        name: "{{ management_token }}" +        namespace: management-infra +        decode: true +      register: sa_secret + +    - name: Print the bearer token value +      debug: +        var: sa_secret.results.decoded.token + +    """ +    secret_name = None + +    for secret in sa_secrets: +        # each secret is a hash +        if secret['name'].find(secret_hint) == -1: +            continue +        else: +            secret_name = secret['name'] +            break + +    return secret_name + + +class FilterModule(object): +    """ Custom ansible filter mapping """ + +    # pylint: disable=no-self-use, too-few-public-methods +    def filters(self): +        """ returns a mapping of filters to methods """ +        return { +            "lib_utils_oo_select_keys": lib_utils_oo_select_keys, +            "lib_utils_oo_select_keys_from_list": lib_utils_oo_select_keys_from_list, +            "lib_utils_oo_chomp_commit_offset": lib_utils_oo_chomp_commit_offset, +            "lib_utils_oo_collect": lib_utils_oo_collect, +            "lib_utils_oo_pdb": lib_utils_oo_pdb, +            "lib_utils_oo_prepend_strings_in_list": lib_utils_oo_prepend_strings_in_list, +            "lib_utils_oo_dict_to_list_of_dict": lib_utils_oo_dict_to_list_of_dict, +            "lib_utils_oo_split": lib_utils_oo_split, +            "lib_utils_oo_dict_to_keqv_list": lib_utils_oo_dict_to_keqv_list, +            "lib_utils_oo_list_to_dict": lib_utils_oo_list_to_dict, +            "lib_utils_oo_parse_named_certificates": lib_utils_oo_parse_named_certificates, +            "lib_utils_oo_generate_secret": lib_utils_oo_generate_secret, +            "lib_utils_oo_pods_match_component": lib_utils_oo_pods_match_component, +            "lib_utils_oo_image_tag_to_rpm_version": lib_utils_oo_image_tag_to_rpm_version, +            "lib_utils_oo_hostname_from_url": lib_utils_oo_hostname_from_url, +            "lib_utils_oo_loadbalancer_frontends": lib_utils_oo_loadbalancer_frontends, +            "lib_utils_oo_loadbalancer_backends": lib_utils_oo_loadbalancer_backends, +            "lib_utils_to_padded_yaml": lib_utils_to_padded_yaml, +            "lib_utils_oo_random_word": lib_utils_oo_random_word, +            "lib_utils_oo_contains_rule": lib_utils_oo_contains_rule, +            "lib_utils_oo_selector_to_string_list": lib_utils_oo_selector_to_string_list, +            "lib_utils_oo_filter_sa_secrets": lib_utils_oo_filter_sa_secrets, +        } diff --git a/roles/lib_utils/library/kubeclient_ca.py b/roles/lib_utils/library/kubeclient_ca.py new file mode 100644 index 000000000..a89a5574f --- /dev/null +++ b/roles/lib_utils/library/kubeclient_ca.py @@ -0,0 +1,88 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' kubeclient_ca ansible module ''' + +import base64 +import yaml +from ansible.module_utils.basic import AnsibleModule + + +DOCUMENTATION = ''' +--- +module: kubeclient_ca +short_description: Modify kubeclient certificate-authority-data +author: Andrew Butcher +requirements: [ ] +''' +EXAMPLES = ''' +- kubeclient_ca: +    client_path: /etc/origin/master/admin.kubeconfig +    ca_path: /etc/origin/master/ca-bundle.crt + +- slurp: +    src: /etc/origin/master/ca-bundle.crt +  register: ca_data +- kubeclient_ca: +    client_path: /etc/origin/master/admin.kubeconfig +    ca_data: "{{ ca_data.content }}" +''' + + +def main(): +    ''' Modify kubeconfig located at `client_path`, setting the +        certificate authority data to specified `ca_data` or contents of +        `ca_path`. +    ''' + +    module = AnsibleModule(  # noqa: F405 +        argument_spec=dict( +            client_path=dict(required=True), +            ca_data=dict(required=False, default=None), +            ca_path=dict(required=False, default=None), +            backup=dict(required=False, default=True, type='bool'), +        ), +        supports_check_mode=True, +        mutually_exclusive=[['ca_data', 'ca_path']], +        required_one_of=[['ca_data', 'ca_path']] +    ) + +    client_path = module.params['client_path'] +    ca_data = module.params['ca_data'] +    ca_path = module.params['ca_path'] +    backup = module.params['backup'] + +    try: +        with open(client_path) as client_config_file: +            client_config_data = yaml.safe_load(client_config_file.read()) + +        if ca_data is None: +            with open(ca_path) as ca_file: +                ca_data = base64.standard_b64encode(ca_file.read()) + +        changes = [] +        # Naively update the CA information for each cluster in the +        # kubeconfig. +        for cluster in client_config_data['clusters']: +            if cluster['cluster']['certificate-authority-data'] != ca_data: +                cluster['cluster']['certificate-authority-data'] = ca_data +                changes.append(cluster['name']) + +        if not module.check_mode: +            if len(changes) > 0 and backup: +                module.backup_local(client_path) + +            with open(client_path, 'w') as client_config_file: +                client_config_string = yaml.dump(client_config_data, default_flow_style=False) +                client_config_string = client_config_string.replace('\'\'', '""') +                client_config_file.write(client_config_string) + +        return module.exit_json(changed=(len(changes) > 0)) + +    # ignore broad-except error to avoid stack trace to ansible user +    # pylint: disable=broad-except +    except Exception as error: +        return module.fail_json(msg=str(error)) + + +if __name__ == '__main__': +    main() diff --git a/roles/lib_utils/library/modify_yaml.py b/roles/lib_utils/library/modify_yaml.py new file mode 100644 index 000000000..9b8f9ba33 --- /dev/null +++ b/roles/lib_utils/library/modify_yaml.py @@ -0,0 +1,117 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- +''' modify_yaml ansible module ''' + +import yaml + +# ignore pylint errors related to the module_utils import +# pylint: disable=redefined-builtin, unused-wildcard-import, wildcard-import +from ansible.module_utils.basic import *  # noqa: F402,F403 + + +DOCUMENTATION = ''' +--- +module: modify_yaml +short_description: Modify yaml key value pairs +author: Andrew Butcher +requirements: [ ] +''' +EXAMPLES = ''' +- modify_yaml: +    dest: /etc/origin/master/master-config.yaml +    yaml_key: 'kubernetesMasterConfig.masterCount' +    yaml_value: 2 +''' + + +def set_key(yaml_data, yaml_key, yaml_value): +    ''' Updates a parsed yaml structure setting a key to a value. + +        :param yaml_data: yaml structure to modify. +        :type yaml_data: dict +        :param yaml_key: Key to modify. +        :type yaml_key: mixed +        :param yaml_value: Value use for yaml_key. +        :type yaml_value: mixed +        :returns: Changes to the yaml_data structure +        :rtype: dict(tuple()) +    ''' +    changes = [] +    ptr = yaml_data +    final_key = yaml_key.split('.')[-1] +    for key in yaml_key.split('.'): +        # Key isn't present and we're not on the final key. Set to empty dictionary. +        if key not in ptr and key != final_key: +            ptr[key] = {} +            ptr = ptr[key] +        # Current key is the final key. Update value. +        elif key == final_key: +            if (key in ptr and module.safe_eval(ptr[key]) != yaml_value) or (key not in ptr):  # noqa: F405 +                ptr[key] = yaml_value +                changes.append((yaml_key, yaml_value)) +        else: +            # Next value is None and we're not on the final key. +            # Turn value into an empty dictionary. +            if ptr[key] is None and key != final_key: +                ptr[key] = {} +            ptr = ptr[key] +    return changes + + +def main(): +    ''' Modify key (supplied in jinja2 dot notation) in yaml file, setting +        the key to the desired value. +    ''' + +    # disabling pylint errors for global-variable-undefined and invalid-name +    # for 'global module' usage, since it is required to use ansible_facts +    # pylint: disable=global-variable-undefined, invalid-name, +    # redefined-outer-name +    global module + +    module = AnsibleModule(  # noqa: F405 +        argument_spec=dict( +            dest=dict(required=True), +            yaml_key=dict(required=True), +            yaml_value=dict(required=True), +            backup=dict(required=False, default=True, type='bool'), +        ), +        supports_check_mode=True, +    ) + +    dest = module.params['dest'] +    yaml_key = module.params['yaml_key'] +    yaml_value = module.safe_eval(module.params['yaml_value']) +    backup = module.params['backup'] + +    # Represent null values as an empty string. +    # pylint: disable=missing-docstring, unused-argument +    def none_representer(dumper, data): +        return yaml.ScalarNode(tag=u'tag:yaml.org,2002:null', value=u'') + +    yaml.add_representer(type(None), none_representer) + +    try: +        with open(dest) as yaml_file: +            yaml_data = yaml.safe_load(yaml_file.read()) + +        changes = set_key(yaml_data, yaml_key, yaml_value) + +        if len(changes) > 0: +            if backup: +                module.backup_local(dest) +            with open(dest, 'w') as yaml_file: +                yaml_string = yaml.dump(yaml_data, default_flow_style=False) +                yaml_string = yaml_string.replace('\'\'', '""') +                yaml_file.write(yaml_string) + +        return module.exit_json(changed=(len(changes) > 0), changes=changes) + +    # ignore broad-except error to avoid stack trace to ansible user +    # pylint: disable=broad-except +    except Exception as error: +        return module.fail_json(msg=str(error)) + + +if __name__ == '__main__': +    main() diff --git a/roles/lib_os_firewall/library/os_firewall_manage_iptables.py b/roles/lib_utils/library/os_firewall_manage_iptables.py index aeee3ede8..aeee3ede8 100755..100644 --- a/roles/lib_os_firewall/library/os_firewall_manage_iptables.py +++ b/roles/lib_utils/library/os_firewall_manage_iptables.py diff --git a/roles/lib_utils/library/rpm_q.py b/roles/lib_utils/library/rpm_q.py new file mode 100644 index 000000000..3dec50fc2 --- /dev/null +++ b/roles/lib_utils/library/rpm_q.py @@ -0,0 +1,72 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +# (c) 2015, Tobias Florek <tob@butter.sh> +# Licensed under the terms of the MIT License +""" +An ansible module to query the RPM database. For use, when yum/dnf are not +available. +""" + +# pylint: disable=redefined-builtin,wildcard-import,unused-wildcard-import +from ansible.module_utils.basic import *  # noqa: F403 + +DOCUMENTATION = """ +--- +module: rpm_q +short_description: Query the RPM database +author: Tobias Florek +options: +  name: +    description: +    - The name of the package to query +    required: true +  state: +    description: +    - Whether the package is supposed to be installed or not +    choices: [present, absent] +    default: present +""" + +EXAMPLES = """ +- rpm_q: name=ansible state=present +- rpm_q: name=ansible state=absent +""" + +RPM_BINARY = '/bin/rpm' + + +def main(): +    """ +    Checks rpm -q for the named package and returns the installed packages +    or None if not installed. +    """ +    module = AnsibleModule(  # noqa: F405 +        argument_spec=dict( +            name=dict(required=True), +            state=dict(default='present', choices=['present', 'absent']) +        ), +        supports_check_mode=True +    ) + +    name = module.params['name'] +    state = module.params['state'] + +    # pylint: disable=invalid-name +    rc, out, err = module.run_command([RPM_BINARY, '-q', name]) + +    installed = out.rstrip('\n').split('\n') + +    if rc != 0: +        if state == 'present': +            module.fail_json(msg="%s is not installed" % name, stdout=out, stderr=err, rc=rc) +        else: +            module.exit_json(changed=False) +    elif state == 'present': +        module.exit_json(changed=False, installed_versions=installed) +    else: +        module.fail_json(msg="%s is installed", installed_versions=installed) + + +if __name__ == '__main__': +    main() diff --git a/roles/nickhammond.logrotate/tasks/main.yml b/roles/nickhammond.logrotate/tasks/main.yml index 677f206ea..50ad7e373 100644 --- a/roles/nickhammond.logrotate/tasks/main.yml +++ b/roles/nickhammond.logrotate/tasks/main.yml @@ -1,7 +1,7 @@  ---  - name: nickhammond.logrotate | Install logrotate    package: name=logrotate state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/nuage_ca/meta/main.yml b/roles/nuage_ca/meta/main.yml index 36838debc..0d0b8d1a5 100644 --- a/roles/nuage_ca/meta/main.yml +++ b/roles/nuage_ca/meta/main.yml @@ -13,4 +13,4 @@ galaxy_info:    - cloud    - system  dependencies: -- { role: nuage_common } +- role: nuage_common diff --git a/roles/nuage_ca/tasks/main.yaml b/roles/nuage_ca/tasks/main.yaml index d96d0d802..cb7844bc5 100644 --- a/roles/nuage_ca/tasks/main.yaml +++ b/roles/nuage_ca/tasks/main.yaml @@ -1,7 +1,7 @@  ---  - name: Install openssl    package: name=openssl state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/nuage_common/tasks/main.yml b/roles/nuage_common/tasks/main.yml index 6c8c9f8d2..ec42518ff 100644 --- a/roles/nuage_common/tasks/main.yml +++ b/roles/nuage_common/tasks/main.yml @@ -2,17 +2,17 @@  - name: Set the Nuage plugin openshift directory fact to handle Atomic host install    set_fact:      nuage_node_plugin_dir: /var/usr/share/vsp-openshift -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Set the Nuage CNI network config directory fact to handle Atomic host install    set_fact:      nuage_node_cni_netconf_dir: /var/etc/cni/net.d/ -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Set the Nuage CNI binary directory fact to handle Atomic host install    set_fact:      nuage_node_cni_bin_dir: /var/opt/cni/bin/ -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Assure CNI plugin config dir exists before daemon set install    become: yes diff --git a/roles/nuage_master/meta/main.yml b/roles/nuage_master/meta/main.yml index e2f7af5ad..643800680 100644 --- a/roles/nuage_master/meta/main.yml +++ b/roles/nuage_master/meta/main.yml @@ -14,4 +14,4 @@ galaxy_info:    - system  dependencies:  - role: lib_openshift -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml index c264427de..29e16b6f8 100644 --- a/roles/nuage_master/tasks/main.yaml +++ b/roles/nuage_master/tasks/main.yaml @@ -5,22 +5,22 @@  - name: Set the Nuage certificate directory fact for Atomic hosts    set_fact:      cert_output_dir: /var/usr/share/nuage-openshift-monitor -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Set the Nuage kubeconfig file path fact for Atomic hosts    set_fact:      kube_config: /var/usr/share/nuage-openshift-monitor/nuage.kubeconfig -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Set the Nuage monitor yaml location fact for Atomic hosts    set_fact:      kubemon_yaml: /var/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Set the Nuage monitor certs location fact for Atomic hosts    set_fact:      nuage_master_crt_dir: /var/usr/share/nuage-openshift-monitor/ -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Set the Nuage master config directory for daemon sets install    set_fact: @@ -35,27 +35,27 @@  - name: Set the Nuage CNI plugin binary directory for daemon sets install    set_fact:      nuage_cni_bin_dsets_mount_dir: /var/opt/cni/bin -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Create directory /usr/share/nuage-openshift-monitor    become: yes    file: path=/usr/share/nuage-openshift-monitor state=directory -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool  - name: Create directory /var/usr/share/nuage-openshift-monitor    become: yes    file: path=/var/usr/share/nuage-openshift-monitor state=directory -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Create directory /var/usr/bin for monitor binary on atomic    become: yes    file: path=/var/usr/bin state=directory -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Create CNI bin directory /var/opt/cni/bin    become: yes    file: path=/var/opt/cni/bin state=directory -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Create the log directory    become: yes diff --git a/roles/nuage_master/tasks/serviceaccount.yml b/roles/nuage_master/tasks/serviceaccount.yml index fbf2c4f8d..9127b33d6 100644 --- a/roles/nuage_master/tasks/serviceaccount.yml +++ b/roles/nuage_master/tasks/serviceaccount.yml @@ -19,7 +19,7 @@  - name: Generate the node client config    command: > -    {{ openshift.common.client_binary }} adm create-api-client-config +    {{ openshift_client_binary }} adm create-api-client-config        --certificate-authority={{ openshift_master_ca_cert }}        --client-dir={{ cert_output_dir }}        --master={{ openshift.master.api_url }} diff --git a/roles/nuage_node/meta/main.yml b/roles/nuage_node/meta/main.yml index 9b0315054..0480502b7 100644 --- a/roles/nuage_node/meta/main.yml +++ b/roles/nuage_node/meta/main.yml @@ -15,4 +15,4 @@ galaxy_info:  dependencies:  - role: nuage_common  - role: nuage_ca -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml index c6b7a9b10..1f1bd1653 100644 --- a/roles/nuage_node/tasks/main.yaml +++ b/roles/nuage_node/tasks/main.yaml @@ -2,17 +2,17 @@  - name: Set the Nuage plugin openshift directory fact for Atomic hosts    set_fact:      vsp_openshift_dir: /var/usr/share/vsp-openshift -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Set the Nuage CNI binary directory fact for Atomic hosts    set_fact:      cni_bin_dir: /var/opt/cni/bin/ -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Set the Nuage plugin certs directory fact for Atomic hosts    set_fact:      nuage_plugin_crt_dir: /var/usr/share/vsp-openshift -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Assure CNI conf dir exists    become: yes @@ -36,7 +36,7 @@  - name: Add additional Docker mounts for Nuage for atomic hosts    become: yes    lineinfile: dest="{{ openshift_atomic_node_config_file }}" line="{{ nuage_atomic_docker_additional_mounts }}" -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Restart node services    command: /bin/true diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml index 74e5d1dde..71de24339 100644 --- a/roles/openshift_aws/defaults/main.yml +++ b/roles/openshift_aws/defaults/main.yml @@ -122,12 +122,25 @@ openshift_aws_ami_map:  openshift_aws_master_group:  - name: "{{ openshift_aws_clusterid }} master group"    group: master +  tags: +    host-type: master +    sub-host-type: default +    runtime: docker  openshift_aws_node_groups:  - name: "{{ openshift_aws_clusterid }} compute group"    group: compute +  tags: +    host-type: node +    sub-host-type: compute +    runtime: docker +  - name: "{{ openshift_aws_clusterid }} infra group"    group: infra +  tags: +    host-type: node +    sub-host-type: infra +    runtime: docker  openshift_aws_created_asgs: []  openshift_aws_current_asgs: [] @@ -144,10 +157,6 @@ openshift_aws_master_group_config:      min_size: 3      max_size: 3      desired_size: 3 -    tags: -      host-type: master -      sub-host-type: default -      runtime: docker      wait_for_instances: True      termination_policy: "{{ openshift_aws_node_group_termination_policy }}"      replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}" @@ -167,10 +176,6 @@ openshift_aws_node_group_config:      min_size: 3      max_size: 100      desired_size: 3 -    tags: -      host-type: node -      sub-host-type: compute -      runtime: docker      termination_policy: "{{ openshift_aws_node_group_termination_policy }}"      replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"      iam_role: "{{ openshift_aws_iam_role_name }}" @@ -186,10 +191,6 @@ openshift_aws_node_group_config:      min_size: 2      max_size: 20      desired_size: 2 -    tags: -      host-type: node -      sub-host-type: infra -      runtime: docker      termination_policy: "{{ openshift_aws_node_group_termination_policy }}"      replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"      iam_role: "{{ openshift_aws_iam_role_name }}" diff --git a/roles/openshift_aws/tasks/build_node_group.yml b/roles/openshift_aws/tasks/build_node_group.yml index 7fb617dd5..9485cc3ac 100644 --- a/roles/openshift_aws/tasks/build_node_group.yml +++ b/roles/openshift_aws/tasks/build_node_group.yml @@ -30,7 +30,7 @@  - name: query all asg's for this cluster    ec2_asg_facts:      region: "{{ openshift_aws_region }}" -    tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(l_node_group_config[openshift_aws_node_group.group].tags) }}" +    tags: "{{ {'kubernetes.io/cluster/' ~ openshift_aws_clusterid: openshift_aws_clusterid} | combine(openshift_aws_node_group.tags) }}"    register: asgs  - fail: diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml index 3632f7ce9..6ce8c58ba 100644 --- a/roles/openshift_aws/tasks/scale_group.yml +++ b/roles/openshift_aws/tasks/scale_group.yml @@ -22,7 +22,7 @@                                      else (l_node_group_config[openshift_aws_node_group.group].replace_all_instances | default(omit)) }}"      tags:      - "{{ openshift_aws_node_group_config_tags -          | combine(l_node_group_config[openshift_aws_node_group.group].tags) +          | combine(openshift_aws_node_group.tags)            | combine({'deployment_serial': l_deployment_serial, 'ami': openshift_aws_ami_map[openshift_aws_node_group.group] | default(openshift_aws_ami)}) }}"  - name: append the asg name to the openshift_aws_created_asgs fact diff --git a/roles/openshift_builddefaults/meta/main.yml b/roles/openshift_builddefaults/meta/main.yml index 422d08400..60ac189a8 100644 --- a/roles/openshift_builddefaults/meta/main.yml +++ b/roles/openshift_builddefaults/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_buildoverrides/meta/main.yml b/roles/openshift_buildoverrides/meta/main.yml index e9d2e8712..edca92e6f 100644 --- a/roles/openshift_buildoverrides/meta/main.yml +++ b/roles/openshift_buildoverrides/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_ca/meta/main.yml b/roles/openshift_ca/meta/main.yml index 81b49ce60..b2081efc6 100644 --- a/roles/openshift_ca/meta/main.yml +++ b/roles/openshift_ca/meta/main.yml @@ -15,3 +15,4 @@ galaxy_info:  dependencies:  - role: openshift_cli  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml index ea4702248..b94cd9fba 100644 --- a/roles/openshift_ca/tasks/main.yml +++ b/roles/openshift_ca/tasks/main.yml @@ -9,9 +9,9 @@  - name: Install the base package for admin tooling    package: -    name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +    name: "{{ openshift_service_type }}{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"      state: present -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool    register: install_result    until: install_result is succeeded    delegate_to: "{{ openshift_ca_host }}" @@ -41,7 +41,7 @@  - set_fact:      master_ca_missing: "{{ False in (g_master_ca_stat_result.results -                                     | oo_collect(attribute='stat.exists') +                                     | lib_utils_oo_collect(attribute='stat.exists')                                       | list) }}"    run_once: true @@ -87,11 +87,11 @@  # This should NOT replace the CA due to --overwrite=false when a CA already exists.  - name: Create the master certificates if they do not already exist    command: > -    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-master-certs -    {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} +    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-master-certs +    {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}      --certificate-authority {{ named_ca_certificate }}      {% endfor %} -    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} +    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}      --certificate-authority {{ legacy_ca_certificate }}      {% endfor %}      --hostnames={{ hostvars[openshift_ca_host].openshift.common.all_hostnames | join(',') }} @@ -117,7 +117,7 @@        src: "{{ item }}"        dest: "{{ openshift_ca_clientconfig_tmpdir.stdout }}/"        remote_src: true -    with_items: "{{ g_master_legacy_ca_result.files | default([]) | oo_collect('path') }}" +    with_items: "{{ g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') }}"      delegate_to: "{{ openshift_ca_host }}"      run_once: true    - copy: @@ -137,7 +137,7 @@  - name: Test local loopback context    command: > -    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view +    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} config view      --config={{ openshift_master_loopback_config }}    changed_when: false    register: loopback_config @@ -154,9 +154,9 @@      register: openshift_ca_loopback_tmpdir    - name: Generate the loopback master client config      command: > -      {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config +      {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config          --certificate-authority={{ openshift_ca_cert }} -        {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} +        {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}          --certificate-authority {{ named_ca_certificate }}          {% endfor %}          --client-dir={{ openshift_ca_loopback_tmpdir.stdout }} diff --git a/roles/openshift_certificate_expiry/meta/main.yml b/roles/openshift_certificate_expiry/meta/main.yml index c13b29ba5..6758f5b36 100644 --- a/roles/openshift_certificate_expiry/meta/main.yml +++ b/roles/openshift_certificate_expiry/meta/main.yml @@ -13,4 +13,5 @@ galaxy_info:    categories:    - cloud    - system -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_cli/meta/main.yml b/roles/openshift_cli/meta/main.yml index 5d2b6abed..e531543b9 100644 --- a/roles/openshift_cli/meta/main.yml +++ b/roles/openshift_cli/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml index 68d82e436..37bed9dbe 100644 --- a/roles/openshift_cli/tasks/main.yml +++ b/roles/openshift_cli/tasks/main.yml @@ -1,7 +1,7 @@  ---  - name: Install clients    package: name={{ openshift_service_type }}-clients{{ openshift_pkg_version | default('') }} state=present -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool    register: result    until: result is succeeded @@ -18,7 +18,7 @@        tag: "{{ openshift_image_tag }}"        backend: "docker"    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool    - not l_use_cli_atomic_image | bool  - block: @@ -34,7 +34,7 @@        tag: "{{ openshift_image_tag }}"        backend: "atomic"    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool    - l_use_cli_atomic_image | bool  - name: Reload facts to pick up installed OpenShift version @@ -42,6 +42,6 @@  - name: Install bash completion for oc tools    package: name=bash-completion state=present -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool    register: result    until: result is succeeded diff --git a/roles/openshift_cloud_provider/meta/main.yml b/roles/openshift_cloud_provider/meta/main.yml index 8ab95bf5a..e49cc4430 100644 --- a/roles/openshift_cloud_provider/meta/main.yml +++ b/roles/openshift_cloud_provider/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_cluster_autoscaler/meta/main.yml b/roles/openshift_cluster_autoscaler/meta/main.yml index d2bbd2576..543eb6fed 100644 --- a/roles/openshift_cluster_autoscaler/meta/main.yml +++ b/roles/openshift_cluster_autoscaler/meta/main.yml @@ -1,3 +1,4 @@  ---  dependencies:  - lib_openshift +- role: lib_utils diff --git a/roles/openshift_daemonset_config/defaults/main.yml b/roles/openshift_daemonset_config/defaults/main.yml new file mode 100644 index 000000000..ebe5671d2 --- /dev/null +++ b/roles/openshift_daemonset_config/defaults/main.yml @@ -0,0 +1,19 @@ +--- +openshift_daemonset_config_namespace: openshift-node +openshift_daemonset_config_daemonset_name: ops-node-config +openshift_daemonset_config_configmap_name: "{{ openshift_daemonset_config_daemonset_name }}" +openshift_daemonset_config_node_selector: +  config: config +openshift_daemonset_config_sa_name: ops +openshift_daemonset_config_configmap_files: {} +openshift_daemonset_config_configmap_literals: {} +openshift_daemonset_config_monitoring: False +openshift_daemonset_config_interval: 300 +openshift_daemonset_config_script: config.sh +openshift_daemonset_config_secret_name: operations-config-secret +openshift_daemonset_config_secrets: {} +openshift_daemonset_config_runasuser: 0 +openshift_daemonset_config_privileged: True +openshift_daemonset_config_resources: +  cpu: 10m +  memory: 10Mi diff --git a/roles/openshift_daemonset_config/meta/main.yml b/roles/openshift_daemonset_config/meta/main.yml new file mode 100644 index 000000000..d2bbd2576 --- /dev/null +++ b/roles/openshift_daemonset_config/meta/main.yml @@ -0,0 +1,3 @@ +--- +dependencies: +- lib_openshift diff --git a/roles/openshift_daemonset_config/tasks/main.yml b/roles/openshift_daemonset_config/tasks/main.yml new file mode 100644 index 000000000..450cc9dca --- /dev/null +++ b/roles/openshift_daemonset_config/tasks/main.yml @@ -0,0 +1,58 @@ +--- +- name: add a sa +  oc_serviceaccount: +    name: "{{ openshift_daemonset_config_sa_name }}" +    namespace: "{{ openshift_daemonset_config_namespace }}" + +- name: add sa to privileged scc +  oc_adm_policy_user: +    namespace: "{{ openshift_daemonset_config_namespace }}" +    resource_kind: scc +    resource_name: privileged +    state: present +    user: "system:serviceaccount:{{ openshift_daemonset_config_namespace }}:{{ openshift_daemonset_config_sa_name }}" + +- name: copy template to disk +  template: +    dest: "/tmp/{{ item.name }}" +    src: "{{ item.name }}.j2" +  with_items: +  - name: daemonset.yml + +- name: copy files to disk +  copy: +    src: "{{ item.key }}" +    dest: "{{ item.value }}" +  with_dict: "{{ openshift_daemonset_config_configmap_files }}" + +- name: create the namespace +  oc_project: +    state: present +    name: "{{ openshift_daemonset_config_namespace }}" + +- name: lay down secrets +  oc_secret: +    state: present +    name: "{{ openshift_daemonset_config_secret_name }}" +    namespace: "{{ openshift_daemonset_config_namespace }}" +    delete_after: true +    contents: "{{ openshift_daemonset_config_secrets }}" +  when: +  - openshift_daemonset_config_secrets != {} + +- name: create the configmap +  oc_configmap: +    state: present +    name: "{{ openshift_daemonset_config_configmap_name }}" +    namespace: "{{ openshift_daemonset_config_namespace }}" +    from_literal: "{{ openshift_daemonset_config_configmap_literals }}" +    from_file: "{{ openshift_daemonset_config_configmap_files }}" + +- name: deploy daemonset +  oc_obj: +    state: present +    namespace: "{{ openshift_daemonset_config_namespace }}"  # openshift-node?? +    name: "{{ openshift_daemonset_config_daemonset_name }}" +    kind: daemonset +    files: +    - /tmp/daemonset.yml diff --git a/roles/openshift_daemonset_config/templates/daemonset.yml.j2 b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 new file mode 100644 index 000000000..9792f6d16 --- /dev/null +++ b/roles/openshift_daemonset_config/templates/daemonset.yml.j2 @@ -0,0 +1,142 @@ +--- +kind: DaemonSet +apiVersion: extensions/v1beta1 +metadata: +  name: {{ openshift_daemonset_config_daemonset_name }} +  annotations: +    kubernetes.io/description: | +      This daemon set manages the operational configuration for a cluster and ensures all nodes have +      a concrete set of config in place. It could also use a local ansible run against the /host directory. +spec: +  selector: +    matchLabels: +      app: {{ openshift_daemonset_config_daemonset_name }} +      confighosts: ops +      ops.openshift.io/role: operations +  updateStrategy: +    type: RollingUpdate +  template: +    metadata: +      labels: +        app: {{ openshift_daemonset_config_daemonset_name }} +        confighosts: ops +        ops.openshift.io/role: operations +      annotations: +        scheduler.alpha.kubernetes.io/critical-pod: '' +    spec: +{% if openshift_daemonset_config_node_selector is defined and openshift_daemonset_config_node_selector != {} %} +      nodeSelector: {{ openshift_daemonset_config_node_selector | to_json }} +{% endif %} +      serviceAccountName: {{ openshift_daemonset_config_sa_name }} +      hostNetwork: true +      hostPID: true +      hostIPC: true +      containers: +      - name: config +        image: centos:7 +        env: +        - name: RESYNC_INTERVAL +          value: "{{ openshift_daemonset_config_interval }}" +        command: +        - /bin/bash +        - -c +        - | +          #!/bin/sh +          set -o errexit + +          while true; do + +            # execute user defined script +            sh /opt/config/{{ openshift_daemonset_config_script }} + +            # sleep for ${RESYNC_INTERVAL} minutes, then loop. if we fail Kubelet will restart us again +            echo "Success, sleeping for ${RESYNC_INTERVAL}s" +            exec sleep ${RESYNC_INTERVAL} + +          # Return to perform the config +          done +        securityContext: +          # Must be root to modify host system +          runAsUser: {{ openshift_daemonset_config_runasuser }} +          # Permission could be reduced by selecting an appropriate SELinux policy that allows +          # us to update the named directories +          privileged: {{ openshift_daemonset_config_privileged }} +        volumeMounts: +        # Directory which contains the host volume. +        - mountPath: /host +          name: host +        # Our node configuration +        - mountPath: /opt/config +          name: config +{% if openshift_daemonset_config_secrets != {} %} +        # Our delivered secrets +        - mountPath: /opt/secrets +          name: secrets +{% endif %} +        resources: +          requests: +            cpu: {{ openshift_daemonset_config_resources.cpu }} +            memory: {{ openshift_daemonset_config_resources.memory }} +{% if openshift_daemonset_config_monitoring %} +      - name: monitoring +        image: openshifttools/oso-centos7-host-monitoring:latest +        securityContext: +          # Must be root to read content +          runAsUser: 0 +          privileged: true + +        volumeMounts: +        - mountPath: /host +          name: host +          readOnly: true +        - mountPath: /etc/localtime +          subPath: etc/localtime +          name: host +          readOnly: true +        - mountPath: /sys +          subPath: sys +          name: host +          readOnly: true +        - mountPath: /var/run/docker.sock +          subPath: var/run/docker.sock +          name: host +          readOnly: true +        - mountPath: /var/run/openvswitch +          subPath: var/run/openvswitch +          name: host +          readOnly: true +        - mountPath: /etc/origin +          subPath: etc/origin +          name: host +          readOnly: true +        - mountPath: /usr/bin/oc +          subPath: usr/bin/oc +          name: host +          readOnly: true +          name: host +          readOnly: true +        - mountPath: /host/var/cache/yum +          subPath: var/cache/yum +          name: host +        - mountPath: /container_setup/monitoring-config.yml +          subPath: monitoring-config.yaml +          name: config +        - mountPath: /opt/config +          name: config +        resources: +          requests: +            cpu: 10m +            memory: 10Mi +{% endif %} +      volumes: +      - name: config +        configMap: +          name: {{ openshift_daemonset_config_configmap_name }} +{% if openshift_daemonset_config_secrets != {} %} +      - name: secrets +        secret: +          secretName: {{ openshift_daemonset_config_secret_name }} +{% endif %} +      - name: host +        hostPath: +          path: / diff --git a/roles/openshift_default_storage_class/meta/main.yml b/roles/openshift_default_storage_class/meta/main.yml index d7d57fe39..30671a59a 100644 --- a/roles/openshift_default_storage_class/meta/main.yml +++ b/roles/openshift_default_storage_class/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_docker_gc/meta/main.yml b/roles/openshift_docker_gc/meta/main.yml index f88a7c533..c8472d8bc 100644 --- a/roles/openshift_docker_gc/meta/main.yml +++ b/roles/openshift_docker_gc/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info:      - 7  dependencies:  - role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_etcd/meta/main.yml b/roles/openshift_etcd/meta/main.yml index 0e28fec03..25ae6a936 100644 --- a/roles/openshift_etcd/meta/main.yml +++ b/roles/openshift_etcd/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info:  dependencies:  - role: openshift_etcd_facts  - role: etcd +- role: lib_utils diff --git a/roles/openshift_etcd_client_certificates/meta/main.yml b/roles/openshift_etcd_client_certificates/meta/main.yml index fbc72c8a3..6c79d345c 100644 --- a/roles/openshift_etcd_client_certificates/meta/main.yml +++ b/roles/openshift_etcd_client_certificates/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info:      - 7    categories:    - cloud -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_etcd_facts/meta/main.yml b/roles/openshift_etcd_facts/meta/main.yml index 925aa9f92..5e64a8596 100644 --- a/roles/openshift_etcd_facts/meta/main.yml +++ b/roles/openshift_etcd_facts/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml index 0c072b64a..9e635b34f 100644 --- a/roles/openshift_etcd_facts/vars/main.yml +++ b/roles/openshift_etcd_facts/vars/main.yml @@ -1,6 +1,6 @@  --- -etcd_is_containerized: "{{ openshift.common.is_containerized }}" -etcd_is_atomic: "{{ openshift.common.is_atomic }}" +etcd_is_containerized: "{{ openshift_is_containerized }}" +etcd_is_atomic: "{{ openshift_is_atomic }}"  etcd_hostname: "{{ openshift.common.hostname }}"  etcd_ip: "{{ openshift.common.ip }}"  etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}" diff --git a/roles/openshift_examples/defaults/main.yml b/roles/openshift_examples/defaults/main.yml index e623b33f3..0a6e8f20c 100644 --- a/roles/openshift_examples/defaults/main.yml +++ b/roles/openshift_examples/defaults/main.yml @@ -8,7 +8,7 @@ openshift_examples_load_quickstarts: true  content_version: "{{ openshift.common.examples_content_version }}" -examples_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/examples" +examples_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/examples"  image_streams_base: "{{ examples_base }}/image-streams"  centos_image_streams:    - "{{ image_streams_base }}/image-streams-centos7.json" diff --git a/roles/openshift_examples/meta/main.yml b/roles/openshift_examples/meta/main.yml index f3fe2dcbe..1a34c85fc 100644 --- a/roles/openshift_examples/meta/main.yml +++ b/roles/openshift_examples/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info:      - 7    categories:    - cloud -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_examples/tasks/main.yml b/roles/openshift_examples/tasks/main.yml index 356317431..a09a598bd 100644 --- a/roles/openshift_examples/tasks/main.yml +++ b/roles/openshift_examples/tasks/main.yml @@ -53,7 +53,7 @@  # RHEL and Centos image streams are mutually exclusive  - name: Import RHEL streams    command: > -    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} +    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}    when: openshift_examples_load_rhel | bool    with_items:      - "{{ rhel_image_streams }}" @@ -63,7 +63,7 @@  - name: Import Centos Image streams    command: > -    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }} +    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ item }}    when: openshift_examples_load_centos | bool    with_items:      - "{{ centos_image_streams }}" @@ -73,7 +73,7 @@  - name: Import db templates    command: > -    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }} +    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ db_templates_base }}    when: openshift_examples_load_db_templates | bool    register: oex_import_db_templates    failed_when: "'already exists' not in oex_import_db_templates.stderr and oex_import_db_templates.rc != 0" @@ -90,7 +90,7 @@      - "{{ quickstarts_base }}/django.json"  - name: Remove defunct quickstart templates from openshift namespace -  command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" +  command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"    with_items:      - nodejs-example      - cakephp-example @@ -102,7 +102,7 @@  - name: Import quickstart-templates    command: > -    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }} +    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ quickstarts_base }}    when: openshift_examples_load_quickstarts | bool    register: oex_import_quickstarts    failed_when: "'already exists' not in oex_import_quickstarts.stderr and oex_import_quickstarts.rc != 0" @@ -116,7 +116,7 @@      - "{{ xpaas_templates_base }}/sso70-basic.json"  - name: Remove old xPaas templates from openshift namespace -  command: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}" +  command: "{{ openshift_client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift delete templates/{{ item }}"    with_items:      - sso70-basic    register: oex_delete_old_xpaas_templates @@ -125,7 +125,7 @@  - name: Import xPaas image streams    command: > -    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }} +    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_image_streams }}    when: openshift_examples_load_xpaas | bool    register: oex_import_xpaas_streams    failed_when: "'already exists' not in oex_import_xpaas_streams.stderr and oex_import_xpaas_streams.rc != 0" @@ -133,7 +133,7 @@  - name: Import xPaas templates    command: > -    {{ openshift.common.client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }} +    {{ openshift_client_binary }} {{ openshift_examples_import_command }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig -n openshift -f {{ xpaas_templates_base }}    when: openshift_examples_load_xpaas | bool    register: oex_import_xpaas_templates    failed_when: "'already exists' not in oex_import_xpaas_templates.stderr and oex_import_xpaas_templates.rc != 0" diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml index ad7c00d14..6532d7fe2 100644 --- a/roles/openshift_excluder/tasks/install.yml +++ b/roles/openshift_excluder/tasks/install.yml @@ -1,14 +1,14 @@  ---  - when: -  - not openshift.common.is_atomic | bool +  - not openshift_is_atomic | bool    - r_openshift_excluder_install_ran is not defined    block:    - name: Install docker excluder - yum      package: -      name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) +  '*' }}" +      name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) +  '*' }}"        state: "{{ r_openshift_excluder_docker_package_state }}"      when:      - r_openshift_excluder_enable_docker_excluder | bool @@ -23,7 +23,7 @@    # https://bugzilla.redhat.com/show_bug.cgi?id=1199432    - name: Install docker excluder - dnf      package: -      name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +      name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"        state: "{{ r_openshift_excluder_docker_package_state }}"      when:      - r_openshift_excluder_enable_docker_excluder | bool @@ -33,7 +33,7 @@    - name: Install openshift excluder - yum      package: -      name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}" +      name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"        state: "{{ r_openshift_excluder_package_state }}"      when:      - r_openshift_excluder_enable_openshift_excluder | bool @@ -47,7 +47,7 @@    # https://bugzilla.redhat.com/show_bug.cgi?id=1199432    - name: Install openshift excluder - dnf      package: -      name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +      name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"        state: "{{ r_openshift_excluder_package_state }}"      when:      - r_openshift_excluder_enable_openshift_excluder | bool diff --git a/roles/openshift_expand_partition/tasks/main.yml b/roles/openshift_expand_partition/tasks/main.yml index c7e21ba99..5ae863871 100644 --- a/roles/openshift_expand_partition/tasks/main.yml +++ b/roles/openshift_expand_partition/tasks/main.yml @@ -1,7 +1,7 @@  ---  - name: Ensure growpart is installed    package: name=cloud-utils-growpart state=present -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool    register: result    until: result is succeeded @@ -10,7 +10,7 @@    register: has_growpart    failed_when: has_growpart.cr != 0 and 'package cloud-utils-growpart is not installed' not in has_growpart.stdout    changed_when: false -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool  - name: Grow the partitions    command: "growpart {{oep_drive}} {{oep_partition}}" diff --git a/roles/openshift_facts/defaults/main.yml b/roles/openshift_facts/defaults/main.yml index 53a3bc87e..980350d14 100644 --- a/roles/openshift_facts/defaults/main.yml +++ b/roles/openshift_facts/defaults/main.yml @@ -1,8 +1,13 @@  --- +openshift_client_binary: "{{ openshift_is_containerized | ternary('/usr/local/bin/oc', 'oc') }}" +  openshift_cli_image_dict:    origin: 'openshift/origin'    openshift-enterprise: 'openshift3/ose' +repoquery_cmd: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0', 'repoquery --plugins') }}" +repoquery_installed: "{{ (ansible_pkg_mgr == 'dnf') | ternary('dnf repoquery --latest-limit 1 -d 0 --disableexcludes=all --installed', 'repoquery --plugins --installed') }}" +  openshift_hosted_images_dict:    origin: 'openshift/origin-${component}:${version}'    openshift-enterprise: 'openshift3/ose-${component}:${version}' @@ -94,11 +99,6 @@ openshift_prometheus_alertbuffer_storage_access_modes:  openshift_prometheus_alertbuffer_storage_create_pv: True  openshift_prometheus_alertbuffer_storage_create_pvc: False - -openshift_router_selector: "region=infra" -openshift_hosted_router_selector: "{{ openshift_router_selector }}" -openshift_hosted_registry_selector: "{{ openshift_router_selector }}" -  openshift_service_type_dict:    origin: origin    openshift-enterprise: atomic-openshift diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py index a10ba9310..d659286dc 100755 --- a/roles/openshift_facts/library/openshift_facts.py +++ b/roles/openshift_facts/library/openshift_facts.py @@ -69,22 +69,6 @@ def migrate_common_facts(facts):      return facts -def migrate_node_facts(facts): -    """ Migrate facts from various roles into node """ -    params = { -        'common': ('dns_ip'), -    } -    if 'node' not in facts: -        facts['node'] = {} -    # pylint: disable=consider-iterating-dictionary -    for role in params.keys(): -        if role in facts: -            for param in params[role]: -                if param in facts[role]: -                    facts['node'][param] = facts[role].pop(param) -    return facts - -  def migrate_admission_plugin_facts(facts):      """ Apply migrations for admission plugin facts """      if 'master' in facts: @@ -104,7 +88,6 @@ def migrate_local_facts(facts):      """ Apply migrations of local facts """      migrated_facts = copy.deepcopy(facts)      migrated_facts = migrate_common_facts(migrated_facts) -    migrated_facts = migrate_node_facts(migrated_facts)      migrated_facts = migrate_admission_plugin_facts(migrated_facts)      return migrated_facts @@ -536,8 +519,7 @@ def set_aggregate_facts(facts):  def set_deployment_facts_if_unset(facts):      """ Set Facts that vary based on deployment_type. This currently -        includes master.registry_url, node.registry_url, -        node.storage_plugin_deps +        includes master.registry_url          Args:              facts (dict): existing facts @@ -545,29 +527,17 @@ def set_deployment_facts_if_unset(facts):              dict: the facts dict updated with the generated deployment_type              facts      """ -    # disabled to avoid breaking up facts related to deployment type into -    # multiple methods for now. -    # pylint: disable=too-many-statements, too-many-branches -    for role in ('master', 'node'): -        if role in facts: -            deployment_type = facts['common']['deployment_type'] -            if 'registry_url' not in facts[role]: -                registry_url = 'openshift/origin-${component}:${version}' -                if deployment_type == 'openshift-enterprise': -                    registry_url = 'openshift3/ose-${component}:${version}' -                facts[role]['registry_url'] = registry_url -      if 'master' in facts:          deployment_type = facts['common']['deployment_type']          openshift_features = ['Builder', 'S2IBuilder', 'WebConsole']          if 'disabled_features' not in facts['master']:              if facts['common']['deployment_subtype'] == 'registry':                  facts['master']['disabled_features'] = openshift_features - -    if 'node' in facts: -        deployment_type = facts['common']['deployment_type'] -        if 'storage_plugin_deps' not in facts['node']: -            facts['node']['storage_plugin_deps'] = ['ceph', 'glusterfs', 'iscsi'] +        if 'registry_url' not in facts['master']: +            registry_url = 'openshift/origin-${component}:${version}' +            if deployment_type == 'openshift-enterprise': +                registry_url = 'openshift3/ose-${component}:${version}' +            facts['master']['registry_url'] = registry_url      return facts @@ -792,62 +762,6 @@ def get_current_config(facts):      return current_config -def build_kubelet_args(facts): -    """Build node kubelet_args - -In the node-config.yaml file, kubeletArgument sub-keys have their -values provided as a list. Hence the gratuitous use of ['foo'] below. -    """ -    cloud_cfg_path = os.path.join( -        facts['common']['config_base'], -        'cloudprovider') - -    # We only have to do this stuff on hosts that are nodes -    if 'node' in facts: -        # Any changes to the kubeletArguments parameter are stored -        # here first. -        kubelet_args = {} - -        if 'cloudprovider' in facts: -            # EVERY cloud is special <3 -            if 'kind' in facts['cloudprovider']: -                if facts['cloudprovider']['kind'] == 'aws': -                    kubelet_args['cloud-provider'] = ['aws'] -                    kubelet_args['cloud-config'] = [cloud_cfg_path + '/aws.conf'] -                if facts['cloudprovider']['kind'] == 'openstack': -                    kubelet_args['cloud-provider'] = ['openstack'] -                    kubelet_args['cloud-config'] = [cloud_cfg_path + '/openstack.conf'] -                if facts['cloudprovider']['kind'] == 'gce': -                    kubelet_args['cloud-provider'] = ['gce'] -                    kubelet_args['cloud-config'] = [cloud_cfg_path + '/gce.conf'] - -        # Automatically add node-labels to the kubeletArguments -        # parameter. See BZ1359848 for additional details. -        # -        # Ref: https://bugzilla.redhat.com/show_bug.cgi?id=1359848 -        if 'labels' in facts['node'] and isinstance(facts['node']['labels'], dict): -            # tl;dr: os_node_labels="{'foo': 'bar', 'a': 'b'}" turns -            # into ['foo=bar', 'a=b'] -            # -            # On the openshift_node_labels inventory variable we loop -            # over each key-value tuple (from .items()) and join the -            # key to the value with an '=' character, this produces a -            # list. -            # -            # map() seems to be returning an itertools.imap object -            # instead of a list. We cast it to a list ourselves. -            # pylint: disable=unnecessary-lambda -            labels_str = list(map(lambda x: '='.join(x), facts['node']['labels'].items())) -            if labels_str != '': -                kubelet_args['node-labels'] = labels_str - -        # If we've added items to the kubelet_args dict then we need -        # to merge the new items back into the main facts object. -        if kubelet_args != {}: -            facts = merge_facts({'node': {'kubelet_args': kubelet_args}}, facts, []) -    return facts - -  def build_controller_args(facts):      """ Build master controller_args """      cloud_cfg_path = os.path.join(facts['common']['config_base'], @@ -973,7 +887,7 @@ def get_openshift_version(facts):      if os.path.isfile('/usr/bin/openshift'):          _, output, _ = module.run_command(['/usr/bin/openshift', 'version'])  # noqa: F405          version = parse_openshift_version(output) -    elif 'common' in facts and 'is_containerized' in facts['common']: +    else:          version = get_container_openshift_version(facts)      # Handle containerized masters that have not yet been configured as a node. @@ -1364,72 +1278,7 @@ def set_container_facts_if_unset(facts):              dict: the facts dict updated with the generated containerization              facts      """ -    deployment_type = facts['common']['deployment_type'] -    if deployment_type == 'openshift-enterprise': -        master_image = 'openshift3/ose' -        node_image = 'openshift3/node' -        ovs_image = 'openshift3/openvswitch' -        pod_image = 'openshift3/ose-pod' -        router_image = 'openshift3/ose-haproxy-router' -        registry_image = 'openshift3/ose-docker-registry' -        deployer_image = 'openshift3/ose-deployer' -    else: -        master_image = 'openshift/origin' -        node_image = 'openshift/node' -        ovs_image = 'openshift/openvswitch' -        pod_image = 'openshift/origin-pod' -        router_image = 'openshift/origin-haproxy-router' -        registry_image = 'openshift/origin-docker-registry' -        deployer_image = 'openshift/origin-deployer' - -    facts['common']['is_atomic'] = os.path.isfile('/run/ostree-booted') - -    if 'is_containerized' not in facts['common']: -        facts['common']['is_containerized'] = facts['common']['is_atomic'] -    if 'pod_image' not in facts['common']: -        facts['common']['pod_image'] = pod_image -    if 'router_image' not in facts['common']: -        facts['common']['router_image'] = router_image -    if 'registry_image' not in facts['common']: -        facts['common']['registry_image'] = registry_image -    if 'deployer_image' not in facts['common']: -        facts['common']['deployer_image'] = deployer_image -    if 'master' in facts and 'master_image' not in facts['master']: -        facts['master']['master_image'] = master_image -        facts['master']['master_system_image'] = master_image -    if 'node' in facts: -        if 'node_image' not in facts['node']: -            facts['node']['node_image'] = node_image -            facts['node']['node_system_image'] = node_image -        if 'ovs_image' not in facts['node']: -            facts['node']['ovs_image'] = ovs_image -            facts['node']['ovs_system_image'] = ovs_image - -    if safe_get_bool(facts['common']['is_containerized']): -        facts['common']['client_binary'] = '/usr/local/bin/oc' - -    return facts - -def set_installed_variant_rpm_facts(facts): -    """ Set RPM facts of installed variant -        Args: -            facts (dict): existing facts -        Returns: -            dict: the facts dict updated with installed_variant_rpms -                          """ -    installed_rpms = [] -    for base_rpm in ['openshift', 'atomic-openshift', 'origin']: -        optional_rpms = ['master', 'node', 'clients', 'sdn-ovs'] -        variant_rpms = [base_rpm] + \ -                       ['{0}-{1}'.format(base_rpm, r) for r in optional_rpms] + \ -                       ['tuned-profiles-%s-node' % base_rpm] -        for rpm in variant_rpms: -            exit_code, _, _ = module.run_command(['rpm', '-q', rpm])  # noqa: F405 -            if exit_code == 0: -                installed_rpms.append(rpm) - -    facts['common']['installed_variant_rpms'] = installed_rpms      return facts @@ -1545,7 +1394,6 @@ class OpenShiftFacts(object):          facts = set_deployment_facts_if_unset(facts)          facts = set_sdn_facts_if_unset(facts, self.system_facts)          facts = set_container_facts_if_unset(facts) -        facts = build_kubelet_args(facts)          facts = build_controller_args(facts)          facts = build_api_server_args(facts)          facts = set_version_facts_if_unset(facts) @@ -1553,8 +1401,6 @@ class OpenShiftFacts(object):          facts = set_proxy_facts(facts)          facts = set_builddefaults_facts(facts)          facts = set_buildoverrides_facts(facts) -        if not safe_get_bool(facts['common']['is_containerized']): -            facts = set_installed_variant_rpm_facts(facts)          facts = set_nodename(facts)          return dict(openshift=facts) @@ -1582,7 +1428,6 @@ class OpenShiftFacts(object):                                    hostname=hostname,                                    public_hostname=hostname,                                    portal_net='172.30.0.0/16', -                                  client_binary='oc',                                    dns_domain='cluster.local',                                    config_base='/etc/origin') @@ -1607,10 +1452,7 @@ class OpenShiftFacts(object):                                        max_requests_inflight=500)          if 'node' in roles: -            defaults['node'] = dict(labels={}, annotations={}, -                                    iptables_sync_period='30s', -                                    local_quota_per_fsgroup="", -                                    set_node_ip=False) +            defaults['node'] = dict(labels={})          if 'cloudprovider' in roles:              defaults['cloudprovider'] = dict(kind=None) diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml index bc8e7bdcf..b8a59ee14 100644 --- a/roles/openshift_health_checker/meta/main.yml +++ b/roles/openshift_health_checker/meta/main.yml @@ -1,3 +1,4 @@  ---  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py index 4f91f6bb3..744b79c1a 100644 --- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py +++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py @@ -160,7 +160,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):                  required.add(self._registry_console_image(image_tag, image_info))          # images for containerized components -        if self.get_var("openshift", "common", "is_containerized"): +        if self.get_var("openshift_is_containerized"):              components = set()              if 'oo_nodes_to_config' in host_groups:                  components.update(["node", "openvswitch"]) diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py index 8b20ccb49..b56d2092b 100644 --- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py +++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py @@ -20,8 +20,8 @@ class EtcdTraffic(OpenShiftCheck):          return super(EtcdTraffic, self).is_active() and valid_group_names and valid_version      def run(self): -        is_containerized = self.get_var("openshift", "common", "is_containerized") -        unit = "etcd_container" if is_containerized else "etcd" +        openshift_is_containerized = self.get_var("openshift_is_containerized") +        unit = "etcd_container" if openshift_is_containerized else "etcd"          log_matchers = [{              "start_regexp": r"Starting Etcd Server", diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py index cfbdea303..567162be1 100644 --- a/roles/openshift_health_checker/openshift_checks/mixins.py +++ b/roles/openshift_health_checker/openshift_checks/mixins.py @@ -10,8 +10,8 @@ class NotContainerizedMixin(object):      def is_active(self):          """Only run on non-containerized hosts.""" -        is_containerized = self.get_var("openshift", "common", "is_containerized") -        return super(NotContainerizedMixin, self).is_active() and not is_containerized +        openshift_is_containerized = self.get_var("openshift_is_containerized") +        return super(NotContainerizedMixin, self).is_active() and not openshift_is_containerized  class DockerHostMixin(object): @@ -23,7 +23,7 @@ class DockerHostMixin(object):          """Only run on hosts that depend on Docker."""          group_names = set(self.get_var("group_names", default=[]))          needs_docker = set(["oo_nodes_to_config"]) -        if self.get_var("openshift.common.is_containerized"): +        if self.get_var("openshift_is_containerized"):              needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"])          return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker)) @@ -33,7 +33,7 @@ class DockerHostMixin(object):          (which would not be able to install but should already have them).          Returns: msg, failed          """ -        if self.get_var("openshift", "common", "is_atomic"): +        if self.get_var("openshift_is_atomic"):              return "", False          # NOTE: we would use the "package" module but it's actually an action plugin diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py index fc333dfd4..9fd6e049d 100644 --- a/roles/openshift_health_checker/test/docker_image_availability_test.py +++ b/roles/openshift_health_checker/test/docker_image_availability_test.py @@ -6,13 +6,8 @@ from openshift_checks.docker_image_availability import DockerImageAvailability,  @pytest.fixture()  def task_vars():      return dict( -        openshift=dict( -            common=dict( -                is_containerized=False, -                is_atomic=False, -            ), -            docker=dict(), -        ), +        openshift_is_atomic=False, +        openshift_is_containerized=False,          openshift_service_type='origin',          openshift_deployment_type='origin',          openshift_image_tag='', @@ -20,7 +15,7 @@ def task_vars():      ) -@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [ +@pytest.mark.parametrize('deployment_type, openshift_is_containerized, group_names, expect_active', [      ("invalid", True, [], False),      ("", True, [], False),      ("origin", False, [], False), @@ -30,20 +25,20 @@ def task_vars():      ("origin", True, ["nfs"], False),      ("openshift-enterprise", True, ["lb"], False),  ]) -def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active): +def test_is_active(task_vars, deployment_type, openshift_is_containerized, group_names, expect_active):      task_vars['openshift_deployment_type'] = deployment_type -    task_vars['openshift']['common']['is_containerized'] = is_containerized +    task_vars['openshift_is_containerized'] = openshift_is_containerized      task_vars['group_names'] = group_names      assert DockerImageAvailability(None, task_vars).is_active() == expect_active -@pytest.mark.parametrize("is_containerized,is_atomic", [ +@pytest.mark.parametrize("openshift_is_containerized,openshift_is_atomic", [      (True, True),      (False, False),      (True, False),      (False, True),  ]) -def test_all_images_available_locally(task_vars, is_containerized, is_atomic): +def test_all_images_available_locally(task_vars, openshift_is_containerized, openshift_is_atomic):      def execute_module(module_name, module_args, *_):          if module_name == "yum":              return {} @@ -55,8 +50,8 @@ def test_all_images_available_locally(task_vars, is_containerized, is_atomic):              'images': [module_args['name']],          } -    task_vars['openshift']['common']['is_containerized'] = is_containerized -    task_vars['openshift']['common']['is_atomic'] = is_atomic +    task_vars['openshift_is_containerized'] = openshift_is_containerized +    task_vars['openshift_is_atomic'] = openshift_is_atomic      result = DockerImageAvailability(execute_module, task_vars).run()      assert not result.get('failed', False) @@ -172,7 +167,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo      assert expect_registries_reached == check.reachable_registries -@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [ +@pytest.mark.parametrize("deployment_type, openshift_is_containerized, groups, oreg_url, expected", [      (  # standard set of stuff required on nodes          "origin", False, ['oo_nodes_to_config'], "",          set([ @@ -232,14 +227,10 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo      ),  ]) -def test_required_images(deployment_type, is_containerized, groups, oreg_url, expected): +def test_required_images(deployment_type, openshift_is_containerized, groups, oreg_url, expected):      task_vars = dict( -        openshift=dict( -            common=dict( -                is_containerized=is_containerized, -                is_atomic=False, -            ), -        ), +        openshift_is_containerized=openshift_is_containerized, +        openshift_is_atomic=False,          openshift_deployment_type=deployment_type,          group_names=groups,          oreg_url=oreg_url, @@ -287,11 +278,7 @@ def test_registry_console_image(task_vars, expected):  def test_containerized_etcd():      task_vars = dict( -        openshift=dict( -            common=dict( -                is_containerized=True, -            ), -        ), +        openshift_is_containerized=True,          openshift_deployment_type="origin",          group_names=['oo_etcd_to_config'],      ) diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py index 8fa68c378..33a5dd90a 100644 --- a/roles/openshift_health_checker/test/docker_storage_test.py +++ b/roles/openshift_health_checker/test/docker_storage_test.py @@ -4,21 +4,21 @@ from openshift_checks import OpenShiftCheckException  from openshift_checks.docker_storage import DockerStorage -@pytest.mark.parametrize('is_containerized, group_names, is_active', [ +@pytest.mark.parametrize('openshift_is_containerized, group_names, is_active', [      (False, ["oo_masters_to_config", "oo_etcd_to_config"], False),      (False, ["oo_masters_to_config", "oo_nodes_to_config"], True),      (True, ["oo_etcd_to_config"], True),  ]) -def test_is_active(is_containerized, group_names, is_active): +def test_is_active(openshift_is_containerized, group_names, is_active):      task_vars = dict( -        openshift=dict(common=dict(is_containerized=is_containerized)), +        openshift_is_containerized=openshift_is_containerized,          group_names=group_names,      )      assert DockerStorage(None, task_vars).is_active() == is_active  def non_atomic_task_vars(): -    return {"openshift": {"common": {"is_atomic": False}}} +    return {"openshift_is_atomic": False}  @pytest.mark.parametrize('docker_info, failed, expect_msg', [ diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py index a29dc166b..583c4c8dd 100644 --- a/roles/openshift_health_checker/test/etcd_traffic_test.py +++ b/roles/openshift_health_checker/test/etcd_traffic_test.py @@ -36,9 +36,7 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)      task_vars = dict(          group_names=group_names, -        openshift=dict( -            common=dict(is_containerized=False), -        ), +        openshift_is_containerized=False,          openshift_service_type="origin"      ) @@ -50,15 +48,13 @@ def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words)      assert result.get("failed", False) == failed -@pytest.mark.parametrize('is_containerized,expected_unit_value', [ +@pytest.mark.parametrize('openshift_is_containerized,expected_unit_value', [      (False, "etcd"),      (True, "etcd_container"),  ]) -def test_systemd_unit_matches_deployment_type(is_containerized, expected_unit_value): +def test_systemd_unit_matches_deployment_type(openshift_is_containerized, expected_unit_value):      task_vars = dict( -        openshift=dict( -            common=dict(is_containerized=is_containerized), -        ) +        openshift_is_containerized=openshift_is_containerized      )      def execute_module(module_name, args, *_): diff --git a/roles/openshift_health_checker/test/mixins_test.py b/roles/openshift_health_checker/test/mixins_test.py index b1a41ca3c..b5d6f2e95 100644 --- a/roles/openshift_health_checker/test/mixins_test.py +++ b/roles/openshift_health_checker/test/mixins_test.py @@ -10,8 +10,8 @@ class NotContainerizedCheck(NotContainerizedMixin, OpenShiftCheck):  @pytest.mark.parametrize('task_vars,expected', [ -    (dict(openshift=dict(common=dict(is_containerized=False))), True), -    (dict(openshift=dict(common=dict(is_containerized=True))), False), +    (dict(openshift_is_containerized=False), True), +    (dict(openshift_is_containerized=True), False),  ])  def test_is_active(task_vars, expected):      assert NotContainerizedCheck(None, task_vars).is_active() == expected @@ -20,4 +20,4 @@ def test_is_active(task_vars, expected):  def test_is_active_missing_task_vars():      with pytest.raises(OpenShiftCheckException) as excinfo:          NotContainerizedCheck().is_active() -    assert 'is_containerized' in str(excinfo.value) +    assert 'openshift_is_containerized' in str(excinfo.value) diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py index dd98ff4d8..0238f49d5 100644 --- a/roles/openshift_health_checker/test/ovs_version_test.py +++ b/roles/openshift_health_checker/test/ovs_version_test.py @@ -70,7 +70,7 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):      assert result is return_value -@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [      (['oo_masters_to_config'], False, True),      # ensure check is skipped on containerized installs      (['oo_masters_to_config'], True, False), @@ -82,9 +82,9 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):      (['lb'], False, False),      (['nfs'], False, False),  ]) -def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): +def test_ovs_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active):      task_vars = dict(          group_names=group_names, -        openshift=dict(common=dict(is_containerized=is_containerized)), +        openshift_is_containerized=openshift_is_containerized,      )      assert OvsVersion(None, task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py index a1e6e0879..52740093d 100644 --- a/roles/openshift_health_checker/test/package_availability_test.py +++ b/roles/openshift_health_checker/test/package_availability_test.py @@ -3,16 +3,16 @@ import pytest  from openshift_checks.package_availability import PackageAvailability -@pytest.mark.parametrize('pkg_mgr,is_containerized,is_active', [ +@pytest.mark.parametrize('pkg_mgr,openshift_is_containerized,is_active', [      ('yum', False, True),      ('yum', True, False),      ('dnf', True, False),      ('dnf', False, False),  ]) -def test_is_active(pkg_mgr, is_containerized, is_active): +def test_is_active(pkg_mgr, openshift_is_containerized, is_active):      task_vars = dict(          ansible_pkg_mgr=pkg_mgr, -        openshift=dict(common=dict(is_containerized=is_containerized)), +        openshift_is_containerized=openshift_is_containerized,      )      assert PackageAvailability(None, task_vars).is_active() == is_active diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py index ea8e02b97..d2916f617 100644 --- a/roles/openshift_health_checker/test/package_version_test.py +++ b/roles/openshift_health_checker/test/package_version_test.py @@ -99,7 +99,7 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc      assert result == return_value -@pytest.mark.parametrize('group_names,is_containerized,is_active', [ +@pytest.mark.parametrize('group_names,openshift_is_containerized,is_active', [      (['oo_masters_to_config'], False, True),      # ensure check is skipped on containerized installs      (['oo_masters_to_config'], True, False), @@ -111,9 +111,9 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc      (['lb'], False, False),      (['nfs'], False, False),  ]) -def test_package_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active): +def test_package_version_skip_when_not_master_nor_node(group_names, openshift_is_containerized, is_active):      task_vars = dict(          group_names=group_names, -        openshift=dict(common=dict(is_containerized=is_containerized)), +        openshift_is_containerized=openshift_is_containerized,      )      assert PackageVersion(None, task_vars).is_active() == is_active diff --git a/roles/openshift_hosted/meta/main.yml b/roles/openshift_hosted/meta/main.yml index ac9e241a5..ace2d15b0 100644 --- a/roles/openshift_hosted/meta/main.yml +++ b/roles/openshift_hosted/meta/main.yml @@ -14,4 +14,4 @@ galaxy_info:  dependencies:  - role: openshift_facts  - role: lib_openshift -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/openshift_hosted/tasks/router.yml b/roles/openshift_hosted/tasks/router.yml index 4e9219477..8ecaacb4a 100644 --- a/roles/openshift_hosted/tasks/router.yml +++ b/roles/openshift_hosted/tasks/router.yml @@ -25,10 +25,10 @@      backup: True      dest: "/etc/origin/master/{{ item | basename }}"      src: "{{ item }}" -  with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') | -                  oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}" +  with_items: "{{ openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | +                  lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"    when: ( not openshift_hosted_router_create_certificate | bool ) or openshift_hosted_router_certificate != {} or -        (  openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 ) +        (  openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length > 0 )  # This is for when we desire a cluster signed cert @@ -55,7 +55,7 @@    when:    - openshift_hosted_router_create_certificate | bool    - openshift_hosted_router_certificate == {} -  - openshift_hosted_routers | oo_collect(attribute='certificate') | oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0 +  - openshift_hosted_routers | lib_utils_oo_collect(attribute='certificate') | lib_utils_oo_select_keys_from_list(['keyfile', 'certfile', 'cafile'])|length == 0  - name: Create the router service account(s)    oc_serviceaccount: diff --git a/roles/openshift_hosted/tasks/storage/glusterfs.yml b/roles/openshift_hosted/tasks/storage/glusterfs.yml index 18b2edcc6..b39c44b01 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs.yml @@ -17,7 +17,7 @@    until:    - "registry_pods.results.results[0]['items'] | count > 0"    # There must be as many matching pods with 'Ready' status True as there are expected replicas -  - "registry_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int" +  - "registry_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == openshift_hosted_registry_replicas | default(l_default_replicas) | int"    delay: 10    retries: "{{ (600 / 10) | int }}" diff --git a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml index bd7181c17..77f020357 100644 --- a/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml +++ b/roles/openshift_hosted/tasks/storage/glusterfs_endpoints.yml @@ -10,7 +10,7 @@      dest: "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"  - name: Create GlusterFS registry service and endpoint -  command: "{{ openshift.common.client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}" +  command: "{{ openshift_client_binary }} apply -f {{ item }} -n {{ openshift_hosted_registry_namespace | default('default') }}"    with_items:    - "{{ mktempHosted.stdout }}/glusterfs-registry-service.yml"    - "{{ mktempHosted.stdout }}/glusterfs-registry-endpoints.yml" diff --git a/roles/openshift_hosted/tasks/wait_for_pod.yml b/roles/openshift_hosted/tasks/wait_for_pod.yml index 056c79334..f4b9939cc 100644 --- a/roles/openshift_hosted/tasks/wait_for_pod.yml +++ b/roles/openshift_hosted/tasks/wait_for_pod.yml @@ -3,7 +3,7 @@    block:    - name: Ensure OpenShift pod correctly rolls out (best-effort today)      command: | -      {{ openshift.common.client_binary }} rollout status deploymentconfig {{ item.name }} \ +      {{ openshift_client_binary }} rollout status deploymentconfig {{ item.name }} \                          --namespace {{ item.namespace | default('default') }} \                          --config {{ openshift_master_config_dir }}/admin.kubeconfig      async: 600 @@ -13,7 +13,7 @@    - name: Determine the latest version of the OpenShift pod deployment      command: | -      {{ openshift.common.client_binary }} get deploymentconfig {{ item.name }} \ +      {{ openshift_client_binary }} get deploymentconfig {{ item.name }} \               --namespace {{ item.namespace }} \               --config {{ openshift_master_config_dir }}/admin.kubeconfig \               -o jsonpath='{ .status.latestVersion }' @@ -22,7 +22,7 @@    - name: Poll for OpenShift pod deployment success      command: | -      {{ openshift.common.client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \ +      {{ openshift_client_binary }} get replicationcontroller {{ item.0.name }}-{{ item.1.stdout }} \               --namespace {{ item.0.namespace }} \               --config {{ openshift_master_config_dir }}/admin.kubeconfig \               -o jsonpath='{ .metadata.annotations.openshift\.io/deployment\.phase }' diff --git a/roles/openshift_hosted_templates/defaults/main.yml b/roles/openshift_hosted_templates/defaults/main.yml index f4fd15089..48d62c8df 100644 --- a/roles/openshift_hosted_templates/defaults/main.yml +++ b/roles/openshift_hosted_templates/defaults/main.yml @@ -1,5 +1,5 @@  --- -hosted_base: "{{ openshift.common.config_base if openshift.common.is_containerized | bool else '/usr/share/openshift' }}/hosted" +hosted_base: "{{ openshift.common.config_base if openshift_is_containerized | bool else '/usr/share/openshift' }}/hosted"  hosted_deployment_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'enterprise' }}"  content_version: "{{ openshift.common.examples_content_version }}" diff --git a/roles/openshift_hosted_templates/meta/main.yml b/roles/openshift_hosted_templates/meta/main.yml index 4027f524b..fca3485fd 100644 --- a/roles/openshift_hosted_templates/meta/main.yml +++ b/roles/openshift_hosted_templates/meta/main.yml @@ -11,4 +11,5 @@ galaxy_info:      - 7    categories:    - cloud -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_hosted_templates/tasks/main.yml b/roles/openshift_hosted_templates/tasks/main.yml index 89b92dfcc..b2313c297 100644 --- a/roles/openshift_hosted_templates/tasks/main.yml +++ b/roles/openshift_hosted_templates/tasks/main.yml @@ -52,7 +52,7 @@  - name: Create or update hosted templates    command: > -    {{ openshift.common.client_binary }} {{ openshift_hosted_templates_import_command }} +    {{ openshift_client_binary }} {{ openshift_hosted_templates_import_command }}      -f {{ hosted_base }}      --config={{ openshift_hosted_templates_kubeconfig }}      -n openshift diff --git a/roles/openshift_loadbalancer/defaults/main.yml b/roles/openshift_loadbalancer/defaults/main.yml index f9c16ba40..6ffe3f11e 100644 --- a/roles/openshift_loadbalancer/defaults/main.yml +++ b/roles/openshift_loadbalancer/defaults/main.yml @@ -2,6 +2,12 @@  r_openshift_loadbalancer_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"  r_openshift_loadbalancer_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +openshift_router_image_default_dict: +  origin: 'openshift/origin-haproxy-router' +  openshift-enterprise: 'openshift3/ose-haproxy-router' +openshift_router_image_default: "{{ openshift_router_image_default_dict[openshift_deployment_type] }}" +openshift_router_image: "{{ openshift_router_image_default }}" +  haproxy_frontends:  - name: main    binds: diff --git a/roles/openshift_loadbalancer/meta/main.yml b/roles/openshift_loadbalancer/meta/main.yml index 72298b599..3b5b45c5f 100644 --- a/roles/openshift_loadbalancer/meta/main.yml +++ b/roles/openshift_loadbalancer/meta/main.yml @@ -10,5 +10,5 @@ galaxy_info:      versions:      - 7  dependencies: -- role: lib_os_firewall +- role: lib_utils  - role: openshift_facts diff --git a/roles/openshift_loadbalancer/tasks/main.yml b/roles/openshift_loadbalancer/tasks/main.yml index 7d23ea6c8..4a11029ab 100644 --- a/roles/openshift_loadbalancer/tasks/main.yml +++ b/roles/openshift_loadbalancer/tasks/main.yml @@ -4,33 +4,33 @@  - name: Install haproxy    package: name=haproxy state=present -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool    register: result    until: result is succeeded  - name: Pull haproxy image    command: > -    docker pull {{ openshift.common.router_image }}:{{ openshift_image_tag }} -  when: openshift.common.is_containerized | bool +    docker pull {{ openshift_router_image }}:{{ openshift_image_tag }} +  when: openshift_is_containerized | bool  - name: Create config directory for haproxy    file:      path: /etc/haproxy      state: directory -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool  - name: Create the systemd unit files    template:      src: "haproxy.docker.service.j2"      dest: "/etc/systemd/system/haproxy.service" -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool    notify: restart haproxy  - name: Configure systemd service directory for haproxy    file:      path: /etc/systemd/system/haproxy.service.d      state: directory -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool  # Work around ini_file create option in 2.2 which defaults to no  - name: Create limits.conf file @@ -41,7 +41,7 @@      owner: root      group: root    changed_when: false -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool  - name: Configure the nofile limits for haproxy    ini_file: @@ -50,7 +50,7 @@      option: LimitNOFILE      value: "{{ openshift_loadbalancer_limit_nofile | default(100000) }}"    notify: restart haproxy -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool  - name: Configure haproxy    template: diff --git a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 index 24fd635ec..de5a8d7c2 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.cfg.j2 @@ -3,7 +3,7 @@  global      maxconn     {{ openshift_loadbalancer_global_maxconn | default(20000) }}      log         /dev/log local0 info -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %}      stats socket /var/lib/haproxy/run/haproxy.sock mode 600 level admin  {% else %}      chroot      /var/lib/haproxy diff --git a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 index 0343a7eb0..90111449c 100644 --- a/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 +++ b/roles/openshift_loadbalancer/templates/haproxy.docker.service.j2 @@ -5,7 +5,7 @@ PartOf={{ openshift_docker_service_name }}.service  [Service]  ExecStartPre=-/usr/bin/docker rm -f openshift_loadbalancer -ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift.common.router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg +ExecStart=/usr/bin/docker run --rm --name openshift_loadbalancer {% for frontend in openshift_loadbalancer_frontends %} {% for bind in frontend.binds %} -p {{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }}:{{ bind |regex_replace('^[^:]*:(\d+).*$', '\\1') }} {% endfor %} {% endfor %} -v /etc/haproxy/haproxy.cfg:/etc/haproxy/haproxy.cfg:ro --entrypoint=haproxy {{ openshift_router_image }}:{{ openshift_image_tag }} -f /etc/haproxy/haproxy.cfg  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop openshift_loadbalancer  LimitNOFILE={{ openshift_loadbalancer_limit_nofile | default(100000) }} diff --git a/roles/openshift_logging/meta/main.yaml b/roles/openshift_logging/meta/main.yaml index 9c480f73a..01ed4918f 100644 --- a/roles/openshift_logging/meta/main.yaml +++ b/roles/openshift_logging/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging/tasks/annotate_ops_projects.yaml b/roles/openshift_logging/tasks/annotate_ops_projects.yaml index 59d6098d4..4a2ee64f0 100644 --- a/roles/openshift_logging/tasks/annotate_ops_projects.yaml +++ b/roles/openshift_logging/tasks/annotate_ops_projects.yaml @@ -1,6 +1,6 @@  ---  - command: > -    {{ openshift.common.client_binary }} +    {{ openshift_client_binary }}      --config={{ openshift.common.config_base }}/master/admin.kubeconfig      get namespaces -o jsonpath={.items[*].metadata.name} {{ __default_logging_ops_projects | join(' ') }}    register: __logging_ops_projects diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml index af36d67c6..51d6d0efd 100644 --- a/roles/openshift_logging/tasks/delete_logging.yaml +++ b/roles/openshift_logging/tasks/delete_logging.yaml @@ -109,14 +109,14 @@  # remove annotations added by logging  - command: > -    {{ openshift.common.client_binary }} +    {{ openshift_client_binary }}      --config={{ openshift.common.config_base }}/master/admin.kubeconfig      get namespaces -o name {{ __default_logging_ops_projects | join(' ') }}    register: __logging_ops_projects  - name: Remove Annotation of Operations Projects    command: > -    {{ openshift.common.client_binary }} +    {{ openshift_client_binary }}      --config={{ openshift.common.config_base }}/master/admin.kubeconfig      annotate {{ project }} openshift.io/logging.ui.hostname-    with_items: "{{ __logging_ops_projects.stdout_lines }}" diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml index 082c0128f..0d7f8c056 100644 --- a/roles/openshift_logging/tasks/generate_certs.yaml +++ b/roles/openshift_logging/tasks/generate_certs.yaml @@ -17,7 +17,7 @@  - name: Generate certificates    command: > -    {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert +    {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert      --key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt      --serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test    check_mode: no @@ -139,10 +139,10 @@  # TODO: make idempotent  - name: Generate proxy session -  set_fact: session_secret={{ 200 | oo_random_word}} +  set_fact: session_secret={{ 200 | lib_utils_oo_random_word}}    check_mode: no  # TODO: make idempotent  - name: Generate oauth client secret -  set_fact: oauth_secret={{ 64 | oo_random_word}} +  set_fact: oauth_secret={{ 64 | lib_utils_oo_random_word}}    check_mode: no diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml index bb8ebec6b..11f59652c 100644 --- a/roles/openshift_logging/tasks/install_logging.yaml +++ b/roles/openshift_logging/tasks/install_logging.yaml @@ -1,7 +1,7 @@  ---  - name: Gather OpenShift Logging Facts    openshift_logging_facts: -    oc_bin: "{{openshift.common.client_binary}}" +    oc_bin: "{{openshift_client_binary}}"      openshift_logging_namespace: "{{openshift_logging_namespace}}"  - name: Set logging project diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml index 00de0ca06..bc817075d 100644 --- a/roles/openshift_logging/tasks/procure_server_certs.yaml +++ b/roles/openshift_logging/tasks/procure_server_certs.yaml @@ -27,7 +27,7 @@  - name: Creating signed server cert and key for {{ cert_info.procure_component }}    command: > -     {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert +     {{ openshift_client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert       --key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt       --hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key       --signer-serial={{generated_certs_dir}}/ca.serial.txt diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml index d4635aab0..9f7c6341c 100644 --- a/roles/openshift_logging_curator/meta/main.yaml +++ b/roles/openshift_logging_curator/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml index e7ef5ff22..524e239b7 100644 --- a/roles/openshift_logging_curator/tasks/main.yaml +++ b/roles/openshift_logging_curator/tasks/main.yaml @@ -2,7 +2,7 @@  - name: Set default image variables based on deployment_type    include_vars: "{{ var_file_name }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"    loop_control:      loop_var: var_file_name diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml index 6a9a6539c..e93d6b73e 100644 --- a/roles/openshift_logging_elasticsearch/meta/main.yaml +++ b/roles/openshift_logging_elasticsearch/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml index 5fe683ae5..6ddeb122e 100644 --- a/roles/openshift_logging_elasticsearch/tasks/main.yaml +++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml @@ -15,10 +15,10 @@      elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"      es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}" -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ var_file_name }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"    loop_control:      loop_var: var_file_name @@ -111,7 +111,7 @@  - name: Create logging-metrics-reader-role    command: > -    {{ openshift.common.client_binary }} +    {{ openshift_client_binary }}      --config={{ openshift.common.config_base }}/master/admin.kubeconfig      -n "{{ openshift_logging_elasticsearch_namespace }}"      create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml" @@ -352,7 +352,7 @@          delete_after: true  - set_fact: -    es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}" +    es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | lib_utils_oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"    when: openshift_logging_elasticsearch_deployment_name == ""  - set_fact: diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2 index cf6ee36bb..4b189f255 100644 --- a/roles/openshift_logging_elasticsearch/templates/es.j2 +++ b/roles/openshift_logging_elasticsearch/templates/es.j2 @@ -50,7 +50,7 @@ spec:             - -provider=openshift             - -client-id={{openshift_logging_elasticsearch_prometheus_sa}}             - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token -           - -cookie-secret={{ 16 | oo_random_word | b64encode }} +           - -cookie-secret={{ 16 | lib_utils_oo_random_word | b64encode }}             - -upstream=https://localhost:9200             - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'             - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}' diff --git a/roles/openshift_node_facts/meta/main.yml b/roles/openshift_logging_eventrouter/meta/main.yaml index 59bf680ce..711bb8f22 100644 --- a/roles/openshift_node_facts/meta/main.yml +++ b/roles/openshift_logging_eventrouter/meta/main.yaml @@ -1,10 +1,10 @@  ---  galaxy_info: -  author: Andrew Butcher -  description: OpenShift Node Facts +  author: OpenShift Red Hat +  description: OpenShift Aggregated Logging Eventrouter    company: Red Hat, Inc.    license: Apache License, Version 2.0 -  min_ansible_version: 1.9 +  min_ansible_version: 2.2    platforms:    - name: EL      versions: @@ -12,4 +12,6 @@ galaxy_info:    categories:    - cloud  dependencies: +- role: lib_openshift  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_eventrouter/tasks/main.yaml b/roles/openshift_logging_eventrouter/tasks/main.yaml index 96b181d61..31780a343 100644 --- a/roles/openshift_logging_eventrouter/tasks/main.yaml +++ b/roles/openshift_logging_eventrouter/tasks/main.yaml @@ -1,8 +1,8 @@  --- -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ var_file_name }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"    loop_control:      loop_var: var_file_name diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml index 89c98204f..62f076780 100644 --- a/roles/openshift_logging_fluentd/meta/main.yaml +++ b/roles/openshift_logging_fluentd/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml index 12b4f5bfd..1cef6c25e 100644 --- a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml +++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml @@ -4,7 +4,7 @@      name: "{{ node }}"      kind: node      state: add -    labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}" +    labels: "{{ openshift_logging_fluentd_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"  # wait half a second between labels  - local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }} diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml index 87eedfb4b..08d7561ac 100644 --- a/roles/openshift_logging_fluentd/tasks/main.yaml +++ b/roles/openshift_logging_fluentd/tasks/main.yaml @@ -34,10 +34,10 @@      msg: WARNING Use of openshift_logging_mux_client_mode=minimal is not recommended due to current scaling issues    when: openshift_logging_mux_client_mode is defined and openshift_logging_mux_client_mode == 'minimal' -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ var_file_name }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"    loop_control:      loop_var: var_file_name diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml index d97586a37..d9d76dfe0 100644 --- a/roles/openshift_logging_kibana/meta/main.yaml +++ b/roles/openshift_logging_kibana/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml index 77bf8042a..3c3bd902e 100644 --- a/roles/openshift_logging_kibana/tasks/main.yaml +++ b/roles/openshift_logging_kibana/tasks/main.yaml @@ -1,9 +1,9 @@  ---  # fail is we don't have an endpoint for ES to connect to? -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ var_file_name }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"    loop_control:      loop_var: var_file_name @@ -69,7 +69,7 @@  # gen session_secret if necessary  - name: Generate session secret    copy: -    content: "{{ 200 | oo_random_word }}" +    content: "{{ 200 | lib_utils_oo_random_word }}"      dest: "{{ generated_certs_dir }}/session_secret"    when:      - not session_secret_file.stat.exists @@ -77,7 +77,7 @@  # gen oauth_secret if necessary  - name: Generate oauth secret    copy: -    content: "{{ 64 | oo_random_word }}" +    content: "{{ 64 | lib_utils_oo_random_word }}"      dest: "{{ generated_certs_dir }}/oauth_secret"    when:      - not oauth_secret_file.stat.exists diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml index f271d8d7d..969752f15 100644 --- a/roles/openshift_logging_mux/meta/main.yaml +++ b/roles/openshift_logging_mux/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml index 68948bce2..59a6301d7 100644 --- a/roles/openshift_logging_mux/tasks/main.yaml +++ b/roles/openshift_logging_mux/tasks/main.yaml @@ -7,10 +7,10 @@      msg: Operations logs destination is required    when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == '' -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ var_file_name }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"    loop_control:      loop_var: var_file_name diff --git a/roles/openshift_manage_node/meta/main.yml b/roles/openshift_manage_node/meta/main.yml index d90cd28cf..a09808a39 100644 --- a/roles/openshift_manage_node/meta/main.yml +++ b/roles/openshift_manage_node/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_manage_node/tasks/main.yml b/roles/openshift_manage_node/tasks/main.yml index a15f336e4..9251d380b 100644 --- a/roles/openshift_manage_node/tasks/main.yml +++ b/roles/openshift_manage_node/tasks/main.yml @@ -18,7 +18,7 @@    retries: 120    delay: 1    changed_when: false -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool    delegate_to: "{{ openshift_master_host }}"    run_once: true @@ -50,10 +50,9 @@      name: "{{ openshift.node.nodename }}"      kind: node      state: add -    labels: "{{ openshift.node.labels | oo_dict_to_list_of_dict }}" +    labels: "{{ openshift_node_labels | lib_utils_oo_dict_to_list_of_dict }}"      namespace: default    when:      - "'nodename' in openshift.node" -    - "'labels' in openshift.node" -    - openshift.node.labels != {} +    - openshift_node_labels | default({}) != {}    delegate_to: "{{ openshift_master_host }}" diff --git a/roles/openshift_manageiq/meta/main.yml b/roles/openshift_manageiq/meta/main.yml index 6c96a91bf..5c9481430 100644 --- a/roles/openshift_manageiq/meta/main.yml +++ b/roles/openshift_manageiq/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: lib_openshift +- role: lib_utils diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml index 24b2ce6ac..ca381b105 100644 --- a/roles/openshift_management/tasks/add_container_provider.yml +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -27,7 +27,7 @@  - name: Ensure the management SA bearer token is identified    set_fact: -    management_token: "{{ sa.results | oo_filter_sa_secrets }}" +    management_token: "{{ sa.results | lib_utils_oo_filter_sa_secrets }}"  - name: Ensure the SA bearer token value is read    oc_secret: diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml index efd119299..5d292ffd0 100644 --- a/roles/openshift_master/defaults/main.yml +++ b/roles/openshift_master/defaults/main.yml @@ -7,6 +7,12 @@ openshift_master_debug_level: "{{ debug_level | default(2) }}"  r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"  r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}" +osm_image_default_dict: +  origin: 'openshift/origin' +  openshift-enterprise: 'openshift3/ose' +osm_image_default: "{{ osm_image_default_dict[openshift_deployment_type] }}" +osm_image: "{{ osm_image_default }}" +  system_images_registry_dict:    openshift-enterprise: "registry.access.redhat.com"    origin: "docker.io" diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml index bf0cbbf18..3460efec9 100644 --- a/roles/openshift_master/meta/main.yml +++ b/roles/openshift_master/meta/main.yml @@ -14,5 +14,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: lib_utils -- role: lib_os_firewall  - role: openshift_facts diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml index 7bfc870d5..eea1401b8 100644 --- a/roles/openshift_master/tasks/main.yml +++ b/roles/openshift_master/tasks/main.yml @@ -16,10 +16,10 @@  - name: Install Master package    package: -    name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}" +    name: "{{ openshift_service_type }}-master{{ openshift_pkg_version | default('') | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"      state: present    when: -  - not openshift.common.is_containerized | bool +  - not openshift_is_containerized | bool    register: result    until: result is succeeded @@ -31,12 +31,12 @@      owner: root      group: root    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool  - name: Reload systemd units    command: systemctl daemon-reload    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool  - name: Re-gather package dependent master facts    openshift_facts: @@ -48,7 +48,7 @@  - name: Create the policy file if it does not already exist    command: > -    {{ openshift.common.client_binary }} adm create-bootstrap-policy-file +    {{ openshift_client_binary }} adm create-bootstrap-policy-file        --filename={{ openshift_master_policy }}    args:      creates: "{{ openshift_master_policy }}" @@ -69,7 +69,7 @@    package: name=httpd-tools state=present    when:    - item.kind == 'HTPasswdPasswordIdentityProvider' -  - not openshift.common.is_atomic | bool +  - not openshift_is_atomic | bool    with_items: "{{ openshift.master.identity_providers }}"    register: result    until: result is succeeded @@ -164,7 +164,7 @@  - name: Install Master system container    include_tasks: system_container.yml    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool    - l_is_master_system_container | bool  - name: Create session secrets file diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml index 8b342a5b4..911a9bd3d 100644 --- a/roles/openshift_master/tasks/registry_auth.yml +++ b/roles/openshift_master/tasks/registry_auth.yml @@ -43,7 +43,7 @@    set_fact:      l_bind_docker_reg_auth: True    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool    - oreg_auth_user is defined    - >        (master_oreg_auth_credentials_stat.stat.exists diff --git a/roles/openshift_master/tasks/set_loopback_context.yml b/roles/openshift_master/tasks/set_loopback_context.yml index 487fefb63..7e013a699 100644 --- a/roles/openshift_master/tasks/set_loopback_context.yml +++ b/roles/openshift_master/tasks/set_loopback_context.yml @@ -1,13 +1,13 @@  ---  - name: Test local loopback context    command: > -    {{ openshift.common.client_binary }} config view +    {{ openshift_client_binary }} config view      --config={{ openshift_master_loopback_config }}    changed_when: false    register: l_loopback_config  - command: > -    {{ openshift.common.client_binary }} config set-cluster +    {{ openshift_client_binary }} config set-cluster      --certificate-authority={{ openshift_master_config_dir }}/ca.crt      --embed-certs=true --server={{ openshift.master.loopback_api_url }}      {{ openshift.master.loopback_cluster_name }} @@ -17,7 +17,7 @@    register: set_loopback_cluster  - command: > -    {{ openshift.common.client_binary }} config set-context +    {{ openshift_client_binary }} config set-context      --cluster={{ openshift.master.loopback_cluster_name }}      --namespace=default --user={{ openshift.master.loopback_user }}      {{ openshift.master.loopback_context_name }} @@ -27,7 +27,7 @@    register: l_set_loopback_context  - command: > -    {{ openshift.common.client_binary }} config use-context {{ openshift.master.loopback_context_name }} +    {{ openshift_client_binary }} config use-context {{ openshift.master.loopback_context_name }}      --config={{ openshift_master_loopback_config }}    when:    - l_set_loopback_context is changed diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml index f6c5ce0dd..dcbf7fd9f 100644 --- a/roles/openshift_master/tasks/system_container.yml +++ b/roles/openshift_master/tasks/system_container.yml @@ -2,7 +2,7 @@  - name: Pre-pull master system container image    command: > -    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }} +    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}    register: l_pull_result    changed_when: "'Pulling layer' in l_pull_result.stdout" @@ -14,7 +14,7 @@  - name: Install or Update HA api master system container    oc_atomic_container:      name: "{{ openshift_service_type }}-master-api" -    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}"      state: latest      values:      - COMMAND=api @@ -22,7 +22,7 @@  - name: Install or Update HA controller master system container    oc_atomic_container:      name: "{{ openshift_service_type }}-master-controllers" -    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osm_image }}:{{ openshift_image_tag }}"      state: latest      values:      - COMMAND=controllers diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml index 1c9ecafaa..870ab7c57 100644 --- a/roles/openshift_master/tasks/systemd_units.yml +++ b/roles/openshift_master/tasks/systemd_units.yml @@ -7,7 +7,7 @@      containerized_svc_dir: "/etc/systemd/system"      ha_svc_template_path: "docker-cluster"    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool  - include_tasks: registry_auth.yml @@ -30,11 +30,11 @@  # This is the image used for both HA and non-HA clusters:  - name: Pre-pull master image    command: > -    docker pull {{ openshift.master.master_image }}:{{ openshift_image_tag }} +    docker pull {{ osm_image }}:{{ openshift_image_tag }}    register: l_pull_result    changed_when: "'Downloaded newer image' in l_pull_result.stdout"    when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool    - not l_is_master_system_container | bool  - name: Create the ha systemd unit files diff --git a/roles/openshift_master/tasks/upgrade.yml b/roles/openshift_master/tasks/upgrade.yml index f84cf2f6e..f143673cf 100644 --- a/roles/openshift_master/tasks/upgrade.yml +++ b/roles/openshift_master/tasks/upgrade.yml @@ -1,6 +1,6 @@  ---  - include_tasks: upgrade/rpm_upgrade.yml -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool  - include_tasks: upgrade/upgrade_scheduler.yml diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2 index 3f7a528a9..4c68155ea 100644 --- a/roles/openshift_master/templates/atomic-openshift-master.j2 +++ b/roles/openshift_master/templates/atomic-openshift-master.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}  {% elif openshift_push_via_dns | default(false) %}  OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000  {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %}  IMAGE_VERSION={{ openshift_image_tag }}  {% endif %} diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 index 5e46d9121..a56c0340c 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-api.service.j2 @@ -21,7 +21,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \    {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \    -v /etc/pki:/etc/pki:ro \    {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ -  {{ openshift.master.master_image }}:${IMAGE_VERSION} start master api \ +  {{ osm_image }}:${IMAGE_VERSION} start master api \    --config=${CONFIG_FILE} $OPTIONS  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-api diff --git a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 index 899575f1a..79171d511 100644 --- a/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 +++ b/roles/openshift_master/templates/docker-cluster/atomic-openshift-master-controllers.service.j2 @@ -20,7 +20,7 @@ ExecStart=/usr/bin/docker run --rm --privileged --net=host \    {% if openshift_cloudprovider_kind | default('') != '' -%} -v {{ openshift.common.config_base }}/cloudprovider:{{ openshift.common.config_base}}/cloudprovider {% endif -%} \    -v /etc/pki:/etc/pki:ro \    {% if l_bind_docker_reg_auth | default(False) %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ -  {{ openshift.master.master_image }}:${IMAGE_VERSION} start master controllers \ +  {{ osm_image }}:${IMAGE_VERSION} start master controllers \    --config=${CONFIG_FILE} $OPTIONS  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-master-controllers diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2 index f1a76e5f5..c224ad714 100644 --- a/roles/openshift_master/templates/master.yaml.v1.j2 +++ b/roles/openshift_master/templates/master.yaml.v1.j2 @@ -1,6 +1,6 @@  admissionConfig:  {% if 'admission_plugin_config' in openshift.master %} -  pluginConfig:{{ openshift.master.admission_plugin_config | to_padded_yaml(level=2) }} +  pluginConfig:{{ openshift.master.admission_plugin_config | lib_utils_to_padded_yaml(level=2) }}  {% endif %}  apiLevels:  - v1 @@ -16,13 +16,13 @@ assetConfig:    metricsPublicURL: {{ openshift_hosted_metrics_deploy_url }}  {% endif %}  {% if 'extension_scripts' in openshift.master %} -  extensionScripts: {{ openshift.master.extension_scripts | to_padded_yaml(1, 2) }} +  extensionScripts: {{ openshift.master.extension_scripts | lib_utils_to_padded_yaml(1, 2) }}  {% endif %}  {% if 'extension_stylesheets' in openshift.master %} -  extensionStylesheets: {{ openshift.master.extension_stylesheets | to_padded_yaml(1, 2) }} +  extensionStylesheets: {{ openshift.master.extension_stylesheets | lib_utils_to_padded_yaml(1, 2) }}  {% endif %}  {% if 'extensions' in openshift.master %} -  extensions: {{ openshift.master.extensions | to_padded_yaml(1, 2) }} +  extensions: {{ openshift.master.extensions | lib_utils_to_padded_yaml(1, 2) }}  {% endif %}    servingInfo:      bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.console_port }} @@ -42,7 +42,7 @@ assetConfig:  {% endfor %}  {% endif %}  {% if openshift.master.audit_config | default(none) is not none %} -auditConfig:{{ openshift.master.audit_config | to_padded_yaml(level=1) }} +auditConfig:{{ openshift.master.audit_config | lib_utils_to_padded_yaml(level=1) }}  {% endif %}  controllerConfig:    election: @@ -85,7 +85,7 @@ imageConfig:    format: {{ openshift.master.registry_url }}    latest: {{ openshift_master_image_config_latest }}  {% if 'image_policy_config' in openshift.master %} -imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }} +imagePolicyConfig:{{ openshift.master.image_policy_config | lib_utils_to_padded_yaml(level=1) }}  {% endif %}  kind: MasterConfig  kubeletClientInfo: @@ -96,21 +96,21 @@ kubeletClientInfo:    port: 10250  {% if openshift.master.embedded_kube | bool %}  kubernetesMasterConfig: -  apiServerArguments: {{ openshift.master.api_server_args | default(None) | to_padded_yaml( level=2 ) }} +  apiServerArguments: {{ openshift.master.api_server_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }}  {% if r_openshift_master_etcd3_storage or ( r_openshift_master_clean_install and openshift.common.version_gte_3_6 ) %}      storage-backend:      - etcd3      storage-media-type:      - application/vnd.kubernetes.protobuf  {% endif %} -  controllerArguments: {{ openshift.master.controller_args | default(None) | to_padded_yaml( level=2 ) }} +  controllerArguments: {{ openshift.master.controller_args | default(None) | lib_utils_to_padded_yaml( level=2 ) }}    masterCount: {{ openshift.master.master_count }}    masterIP: {{ openshift.common.ip }}    podEvictionTimeout: {{ openshift.master.pod_eviction_timeout | default("") }}    proxyClientInfo:      certFile: master.proxy-client.crt      keyFile: master.proxy-client.key -  schedulerArguments: {{ openshift_master_scheduler_args | default(None) | to_padded_yaml( level=3 ) }} +  schedulerArguments: {{ openshift_master_scheduler_args | default(None) | lib_utils_to_padded_yaml( level=3 ) }}    schedulerConfigFile: {{ openshift_master_scheduler_conf }}    servicesNodePortRange: "{{ openshift_node_port_range | default("") }}"    servicesSubnet: {{ openshift.common.portal_net }} @@ -144,7 +144,7 @@ networkConfig:  {% endif %}  # serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet    serviceNetworkCIDR: {{ openshift.common.portal_net }} -  externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | to_padded_yaml(1,2) }} +  externalIPNetworkCIDRs: {{ openshift_master_external_ip_network_cidrs | default(["0.0.0.0/0"]) | lib_utils_to_padded_yaml(1,2) }}  {% if openshift_master_ingress_ip_network_cidr is defined %}    ingressIPNetworkCIDR: {{ openshift_master_ingress_ip_network_cidr }}  {% endif %} @@ -153,7 +153,7 @@ oauthConfig:    alwaysShowProviderSelection: {{ openshift.master.oauth_always_show_provider_selection }}  {% endif %}  {% if 'oauth_templates' in openshift.master %} -  templates:{{ openshift.master.oauth_templates | to_padded_yaml(level=2) }} +  templates:{{ openshift.master.oauth_templates | lib_utils_to_padded_yaml(level=2) }}  {% endif %}    assetPublicURL: {{ openshift.master.public_console_url }}/    grantConfig: diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 index cc21b37af..bff32b2e3 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}  {% elif openshift_push_via_dns | default(false) %}  OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000  {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %}  IMAGE_VERSION={{ openshift_image_tag }}  {% endif %} diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 index 493fc510e..b8a519baa 100644 --- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 +++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 @@ -6,7 +6,7 @@ CONFIG_FILE={{ openshift_master_config_file }}  {% elif openshift_push_via_dns | default(false) %}  OPENSHIFT_DEFAULT_REGISTRY=docker-registry.default.svc:5000  {% endif %} -{% if openshift.common.is_containerized | bool %} +{% if openshift_is_containerized | bool %}  IMAGE_VERSION={{ openshift_image_tag }}  {% endif %} diff --git a/roles/openshift_master_certificates/meta/main.yml b/roles/openshift_master_certificates/meta/main.yml index 300b2cbff..e7d9f5bba 100644 --- a/roles/openshift_master_certificates/meta/main.yml +++ b/roles/openshift_master_certificates/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info:    categories:    - cloud    - system -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml index ec1fbb1ee..00cabe574 100644 --- a/roles/openshift_master_certificates/tasks/main.yml +++ b/roles/openshift_master_certificates/tasks/main.yml @@ -27,7 +27,7 @@      master_certs_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool                                else (False in (g_master_cert_stat_result.results                                                | default({}) -                                              | oo_collect(attribute='stat.exists') +                                              | lib_utils_oo_collect(attribute='stat.exists')                                                | list)) }}"  - name: Ensure the generated_configs directory present @@ -47,11 +47,11 @@  - name: Create the master server certificate    command: > -    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert -    {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} +    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert +    {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}      --certificate-authority {{ named_ca_certificate }}      {% endfor %} -    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} +    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}      --certificate-authority {{ legacy_ca_certificate }}      {% endfor %}      --hostnames={{ hostvars[item].openshift.common.all_hostnames | join(',') }} @@ -64,16 +64,16 @@      --overwrite=false    when: item != openshift_ca_host    with_items: "{{ hostvars -                  | oo_select_keys(groups['oo_masters_to_config']) -                  | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" +                  | lib_utils_oo_select_keys(groups['oo_masters_to_config']) +                  | lib_utils_oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"    delegate_to: "{{ openshift_ca_host }}"    run_once: true  - name: Generate the loopback master client config    command: > -    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config +    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config        --certificate-authority={{ openshift_ca_cert }} -      {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %} +      {% for named_ca_certificate in openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}        --certificate-authority {{ named_ca_certificate }}        {% endfor %}        --client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }} @@ -89,8 +89,8 @@    args:      creates: "{{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}/openshift-master.kubeconfig"    with_items: "{{ hostvars -                  | oo_select_keys(groups['oo_masters_to_config']) -                  | oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}" +                  | lib_utils_oo_select_keys(groups['oo_masters_to_config']) +                  | lib_utils_oo_collect(attribute='inventory_hostname', filters={'master_certs_missing':True}) }}"    when: item != openshift_ca_host    delegate_to: "{{ openshift_ca_host }}"    run_once: true diff --git a/roles/openshift_master_facts/filter_plugins/oo_filters.py b/roles/openshift_master_facts/filter_plugins/oo_filters.py deleted file mode 120000 index 6f9bc47c1..000000000 --- a/roles/openshift_master_facts/filter_plugins/oo_filters.py +++ /dev/null @@ -1 +0,0 @@ -../../../filter_plugins/oo_filters.py
\ No newline at end of file diff --git a/roles/openshift_master_facts/meta/main.yml b/roles/openshift_master_facts/meta/main.yml index 9dbf719f8..0ab2311d3 100644 --- a/roles/openshift_master_facts/meta/main.yml +++ b/roles/openshift_master_facts/meta/main.yml @@ -13,3 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml index 418dcba67..ad9a21c96 100644 --- a/roles/openshift_master_facts/tasks/main.yml +++ b/roles/openshift_master_facts/tasks/main.yml @@ -15,7 +15,7 @@    set_fact:      g_metrics_hostname: "{{ openshift_hosted_metrics_public_url                          | default('hawkular-metrics.' ~ openshift_master_default_subdomain) -                        | oo_hostname_from_url }}" +                        | lib_utils_oo_hostname_from_url }}"  - set_fact:      openshift_hosted_metrics_deploy_url: "https://{{ g_metrics_hostname }}/hawkular/metrics" @@ -72,7 +72,6 @@        controller_args: "{{ osm_controller_args | default(None) }}"        disabled_features: "{{ osm_disabled_features | default(None) }}"        master_count: "{{ openshift_master_count | default(None) }}" -      master_image: "{{ osm_image | default(None) }}"        admission_plugin_config: "{{openshift_master_admission_plugin_config }}"        kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}"  # deprecated, merged with admission_plugin_config        oauth_template: "{{ openshift_master_oauth_template | default(None) }}"  # deprecated in origin 1.2 / OSE 3.2 diff --git a/roles/openshift_metrics/meta/main.yaml b/roles/openshift_metrics/meta/main.yaml index 50214135c..675ec112f 100644 --- a/roles/openshift_metrics/meta/main.yaml +++ b/roles/openshift_metrics/meta/main.yaml @@ -15,5 +15,6 @@ galaxy_info:    categories:    - openshift  dependencies: -- { role: lib_openshift } -- { role: openshift_facts } +- role: lib_openshift +- role: lib_utils +- role: openshift_facts diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml index bb842d710..b71e35263 100644 --- a/roles/openshift_metrics/tasks/generate_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_certificates.yaml @@ -1,7 +1,7 @@  ---  - name: generate ca certificate chain    command: > -    {{ openshift.common.client_binary }} adm ca create-signer-cert +    {{ openshift_client_binary }} adm ca create-signer-cert      --config={{ mktemp.stdout }}/admin.kubeconfig      --key='{{ mktemp.stdout }}/ca.key'      --cert='{{ mktemp.stdout }}/ca.crt' diff --git a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml index 0fd19c9f8..9395fceca 100644 --- a/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml +++ b/roles/openshift_metrics/tasks/generate_hawkular_certificates.yaml @@ -14,7 +14,7 @@    changed_when: no  - name: generate password for hawkular metrics -  local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | oo_random_word }}" +  local_action: copy dest="{{ local_tmp.stdout }}/{{ item }}.pwd" content="{{ 15 | lib_utils_oo_random_word }}"    with_items:    - hawkular-metrics    become: false diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml index 48584bd64..9026cc897 100644 --- a/roles/openshift_metrics/tasks/install_cassandra.yaml +++ b/roles/openshift_metrics/tasks/install_cassandra.yaml @@ -1,6 +1,6 @@  ---  - shell: > -    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} +    {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}      --config={{ mktemp.stdout }}/admin.kubeconfig      get rc hawkular-cassandra-{{node}} -o jsonpath='{.spec.replicas}' || echo 0    vars: diff --git a/roles/openshift_metrics/tasks/install_hawkular.yaml b/roles/openshift_metrics/tasks/install_hawkular.yaml index a4ffa1890..f45e7a042 100644 --- a/roles/openshift_metrics/tasks/install_hawkular.yaml +++ b/roles/openshift_metrics/tasks/install_hawkular.yaml @@ -1,6 +1,6 @@  ---  - command: > -    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} +    {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}      --config={{ mktemp.stdout }}/admin.kubeconfig      get rc hawkular-metrics -o jsonpath='{.spec.replicas}'    register: hawkular_metrics_replica_count diff --git a/roles/openshift_metrics/tasks/install_heapster.yaml b/roles/openshift_metrics/tasks/install_heapster.yaml index a33b28ba7..73e7454f0 100644 --- a/roles/openshift_metrics/tasks/install_heapster.yaml +++ b/roles/openshift_metrics/tasks/install_heapster.yaml @@ -1,6 +1,6 @@  ---  - command: > -    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project | quote }} +    {{ openshift_client_binary }} -n {{ openshift_metrics_project | quote }}      --config={{ mktemp.stdout }}/admin.kubeconfig      get rc heapster -o jsonpath='{.spec.replicas}'    register: heapster_replica_count diff --git a/roles/openshift_metrics/tasks/install_metrics.yaml b/roles/openshift_metrics/tasks/install_metrics.yaml index 49d1d8cf1..106909941 100644 --- a/roles/openshift_metrics/tasks/install_metrics.yaml +++ b/roles/openshift_metrics/tasks/install_metrics.yaml @@ -70,7 +70,7 @@  - include_tasks: update_master_config.yaml  - command: > -    {{openshift.common.client_binary}} +    {{openshift_client_binary}}      --config={{mktemp.stdout}}/admin.kubeconfig      get rc      -l metrics-infra diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml index 9dfe360bb..b67077bca 100644 --- a/roles/openshift_metrics/tasks/main.yaml +++ b/roles/openshift_metrics/tasks/main.yaml @@ -9,10 +9,10 @@        - "'not installed' not in passlib_result.stdout"      msg: "python-passlib rpm must be installed on control host" -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ item }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"  - name: Set metrics image facts diff --git a/roles/openshift_metrics/tasks/oc_apply.yaml b/roles/openshift_metrics/tasks/oc_apply.yaml index 1e1af40e8..8ccfb7192 100644 --- a/roles/openshift_metrics/tasks/oc_apply.yaml +++ b/roles/openshift_metrics/tasks/oc_apply.yaml @@ -1,7 +1,7 @@  ---  - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}    command: > -    {{ openshift.common.client_binary }} +    {{ openshift_client_binary }}      --config={{ kubeconfig }}      get {{file_content.kind}} {{file_content.metadata.name}}      -o jsonpath='{.metadata.resourceVersion}' @@ -12,7 +12,7 @@  - name: Applying {{file_name}}    command: > -    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    {{ openshift_client_binary }} --config={{ kubeconfig }}      apply -f {{ file_name }}      -n {{namespace}}    register: generation_apply @@ -21,7 +21,7 @@  - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}    command: > -    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    {{ openshift_client_binary }} --config={{ kubeconfig }}      get {{file_content.kind}} {{file_content.metadata.name}}      -o jsonpath='{.metadata.resourceVersion}'      -n {{namespace}} diff --git a/roles/openshift_metrics/tasks/pre_install.yaml b/roles/openshift_metrics/tasks/pre_install.yaml index d6756f9b9..976763236 100644 --- a/roles/openshift_metrics/tasks/pre_install.yaml +++ b/roles/openshift_metrics/tasks/pre_install.yaml @@ -14,7 +14,7 @@  - name: list existing secrets    command: > -    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} +    {{ openshift_client_binary }} -n {{ openshift_metrics_project }}      --config={{ mktemp.stdout }}/admin.kubeconfig      get secrets -o name    register: metrics_secrets diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml index 2d880f4d6..223bd975e 100644 --- a/roles/openshift_metrics/tasks/setup_certificate.yaml +++ b/roles/openshift_metrics/tasks/setup_certificate.yaml @@ -1,7 +1,7 @@  ---  - name: generate {{ component }} keys    command: > -    {{ openshift.common.client_binary }} adm ca create-server-cert +    {{ openshift_client_binary }} adm ca create-server-cert      --config={{ mktemp.stdout }}/admin.kubeconfig      --key='{{ mktemp.stdout }}/{{ component }}.key'      --cert='{{ mktemp.stdout }}/{{ component }}.crt' @@ -23,7 +23,7 @@  - name: generate random password for the {{ component }} keystore    copy: -    content: "{{ 15 | oo_random_word }}" +    content: "{{ 15 | lib_utils_oo_random_word }}"      dest: '{{ mktemp.stdout }}/{{ component }}-keystore.pwd'  - slurp: src={{ mktemp.stdout | quote }}/{{ component|quote }}-keystore.pwd @@ -39,5 +39,5 @@  - name: generate random password for the {{ component }} truststore    copy: -    content: "{{ 15 | oo_random_word }}" +    content: "{{ 15 | lib_utils_oo_random_word }}"      dest: '{{ mktemp.stdout | quote }}/{{ component|quote }}-truststore.pwd' diff --git a/roles/openshift_metrics/tasks/start_metrics.yaml b/roles/openshift_metrics/tasks/start_metrics.yaml index 2037e8dc3..899251727 100644 --- a/roles/openshift_metrics/tasks/start_metrics.yaml +++ b/roles/openshift_metrics/tasks/start_metrics.yaml @@ -1,6 +1,6 @@  ---  - command: > -    {{openshift.common.client_binary}} +    {{openshift_client_binary}}      --config={{mktemp.stdout}}/admin.kubeconfig      get rc      -l metrics-infra=hawkular-cassandra @@ -23,7 +23,7 @@    changed_when: metrics_cassandra_rc | length > 0  - command: > -    {{openshift.common.client_binary}} +    {{openshift_client_binary}}      --config={{mktemp.stdout}}/admin.kubeconfig      get rc      -l metrics-infra=hawkular-metrics @@ -45,7 +45,7 @@    changed_when: metrics_metrics_rc | length > 0  - command: > -    {{openshift.common.client_binary}} +    {{openshift_client_binary}}      --config={{mktemp.stdout}}/admin.kubeconfig      get rc      -l metrics-infra=heapster diff --git a/roles/openshift_metrics/tasks/stop_metrics.yaml b/roles/openshift_metrics/tasks/stop_metrics.yaml index 9a2ce9267..4b1d7119d 100644 --- a/roles/openshift_metrics/tasks/stop_metrics.yaml +++ b/roles/openshift_metrics/tasks/stop_metrics.yaml @@ -1,6 +1,6 @@  ---  - command: > -    {{openshift.common.client_binary}} +    {{openshift_client_binary}}      --config={{mktemp.stdout}}/admin.kubeconfig      get rc      -l metrics-infra=heapster @@ -22,7 +22,7 @@      loop_var: object  - command: > -    {{openshift.common.client_binary}} +    {{openshift_client_binary}}      --config={{mktemp.stdout}}/admin.kubeconfig      get rc      -l metrics-infra=hawkular-metrics @@ -44,7 +44,7 @@    changed_when: metrics_hawkular_rc | length > 0  - command: > -    {{openshift.common.client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig +    {{openshift_client_binary}} --config={{mktemp.stdout}}/admin.kubeconfig      get rc      -o name      -l metrics-infra=hawkular-cassandra diff --git a/roles/openshift_metrics/tasks/uninstall_hosa.yaml b/roles/openshift_metrics/tasks/uninstall_hosa.yaml index 42ed02460..ae3306496 100644 --- a/roles/openshift_metrics/tasks/uninstall_hosa.yaml +++ b/roles/openshift_metrics/tasks/uninstall_hosa.yaml @@ -1,7 +1,7 @@  ---  - name: remove Hawkular Agent (HOSA) components    command: > -    {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig +    {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig      delete --ignore-not-found --selector=metrics-infra=agent      all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings    register: delete_metrics @@ -9,7 +9,7 @@  - name: remove rolebindings    command: > -    {{ openshift.common.client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig +    {{ openshift_client_binary }} -n {{ openshift_metrics_hawkular_agent_namespace }} --config={{ mktemp.stdout }}/admin.kubeconfig      delete --ignore-not-found      clusterrolebinding/hawkular-openshift-agent-rb    changed_when: delete_metrics.stdout != 'No resources found' diff --git a/roles/openshift_metrics/tasks/uninstall_metrics.yaml b/roles/openshift_metrics/tasks/uninstall_metrics.yaml index 1265c7bfd..0ab0eec4b 100644 --- a/roles/openshift_metrics/tasks/uninstall_metrics.yaml +++ b/roles/openshift_metrics/tasks/uninstall_metrics.yaml @@ -4,7 +4,7 @@  - name: remove metrics components    command: > -    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig +    {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig      delete --ignore-not-found --selector=metrics-infra      all,sa,secrets,templates,routes,pvc,rolebindings,clusterrolebindings,clusterrole    register: delete_metrics @@ -12,7 +12,7 @@  - name: remove rolebindings    command: > -    {{ openshift.common.client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig +    {{ openshift_client_binary }} -n {{ openshift_metrics_project }} --config={{ mktemp.stdout }}/admin.kubeconfig      delete --ignore-not-found      rolebinding/hawkular-view      clusterrolebinding/heapster-cluster-reader diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 index e976bc222..7c75b2f97 100644 --- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 +++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 @@ -64,7 +64,7 @@ spec:          - name: MASTER_URL            value: "{{ openshift_metrics_master_url }}"          - name: JGROUPS_PASSWORD -          value: "{{ 17 | oo_random_word }}" +          value: "{{ 17 | lib_utils_oo_random_word }}"          - name: TRUSTSTORE_AUTHORITIES            value: "/hawkular-metrics-certs/tls.truststore.crt"          - name: ENABLE_PROMETHEUS_ENDPOINT diff --git a/roles/openshift_named_certificates/meta/main.yml b/roles/openshift_named_certificates/meta/main.yml index 2c6e12494..e7d81df53 100644 --- a/roles/openshift_named_certificates/meta/main.yml +++ b/roles/openshift_named_certificates/meta/main.yml @@ -14,3 +14,4 @@ galaxy_info:    - system  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_named_certificates/tasks/main.yml b/roles/openshift_named_certificates/tasks/main.yml index 1bcf9ef67..ad5472445 100644 --- a/roles/openshift_named_certificates/tasks/main.yml +++ b/roles/openshift_named_certificates/tasks/main.yml @@ -1,6 +1,6 @@  ---  - set_fact: -    parsed_named_certificates: "{{ named_certificates | oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}" +    parsed_named_certificates: "{{ named_certificates | lib_utils_oo_parse_named_certificates(named_certs_dir, internal_hostnames) }}"    when: named_certificates | length > 0    delegate_to: localhost    become: no @@ -43,4 +43,4 @@      src: "{{ item }}"      dest: "{{ named_certs_dir }}/{{ item | basename }}"      mode: 0600 -  with_items: "{{ named_certificates | oo_collect('cafile') }}" +  with_items: "{{ named_certificates | lib_utils_oo_collect('cafile') }}" diff --git a/roles/openshift_nfs/meta/main.yml b/roles/openshift_nfs/meta/main.yml index d7b5910f2..17c0cf33f 100644 --- a/roles/openshift_nfs/meta/main.yml +++ b/roles/openshift_nfs/meta/main.yml @@ -13,4 +13,4 @@ galaxy_info:    - cloud  dependencies:  - role: lib_utils -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml index fff927944..a90aad532 100644 --- a/roles/openshift_node/defaults/main.yml +++ b/roles/openshift_node/defaults/main.yml @@ -1,6 +1,64 @@  ---  openshift_node_debug_level: "{{ debug_level | default(2) }}" - +openshift_node_iptables_sync_period: '30s' +osn_storage_plugin_deps: +- ceph +- glusterfs +- iscsi +openshift_node_local_quota_per_fsgroup: "" +openshift_node_proxy_mode: iptables +openshift_set_node_ip: False +openshift_config_base: '/etc/origin' + +openshift_oreg_url_default_dict: +  origin: "openshift/origin-${component}:${version}" +  openshift-enterprise: "openshift3/ose-${component}:${version}" +openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}" +oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) }}" + +osn_ovs_image_default_dict: +  origin: "openshift/openvswitch" +  openshift-enterprise: "openshift3/openvswitch" +osn_ovs_image_default: "{{ osn_ovs_image_default_dict[openshift_deployment_type] }}" +osn_ovs_image: "{{ osn_ovs_image_default }}" + +openshift_dns_ip: "{{ ansible_default_ipv4['address'] }}" + +openshift_node_env_vars: {} + +# Create list of 'k=v' pairs. +l_node_kubelet_node_labels: "{{ openshift_node_labels | default({}) | lib_utils_oo_dict_to_keqv_list }}" + +openshift_node_kubelet_args_dict: +  aws: +    cloud-provider: +    - aws +    cloud-config: +    - "{{ openshift_config_base ~ '/aws.conf' }}" +    node-labels: "{{ l_node_kubelet_node_labels }}" +  openstack: +    cloud-provider: +    - openstack +    cloud-config: +    - "{{ openshift_config_base ~ '/openstack.conf' }}" +    node-labels: "{{ l_node_kubelet_node_labels }}" +  gce: +    cloud-provider: +    - gce +    cloud-config: +    - "{{ openshift_config_base ~ '/gce.conf' }}" +    node-labels: "{{ l_node_kubelet_node_labels }}" +  undefined: +    node-labels: "{{ l_node_kubelet_node_labels }}" + +l_node_kubelet_args_default: "{{ openshift_node_kubelet_args_dict[openshift_cloudprovider_kind | default('undefined')] }}" + +l_openshift_node_kubelet_args: "{{ openshift_node_kubelet_args | default({}) }}" +# Combine the default kubelet_args dictionary (based on cloud provider, if provided) +# with user-supplied openshift_node_kubelet_args. +# openshift_node_kubelet_args will override the defaults, if keys and/or subkeys +# are present in both. +l2_openshift_node_kubelet_args: "{{ l_node_kubelet_args_default | combine(l_openshift_node_kubelet_args, recursive=True) }}"  openshift_node_dnsmasq_install_network_manager_hook: true  # lo must always be present in this list or dnsmasq will conflict with @@ -14,10 +72,15 @@ r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }  l_is_node_system_container: "{{ (openshift_use_node_system_container | default(openshift_use_system_containers | default(false)) | bool) }}"  openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}" + +openshift_node_image_dict: +  origin: 'openshift/node' +  openshift-enterprise: 'openshift3/node' +osn_image: "{{ openshift_node_image_dict[openshift_deployment_type] }}" +  openshift_service_type_dict:    origin: origin    openshift-enterprise: atomic-openshift -  openshift_service_type: "{{ openshift_service_type_dict[openshift_deployment_type] }}"  system_images_registry_dict: diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml index 62e0e1341..779916335 100644 --- a/roles/openshift_node/handlers/main.yml +++ b/roles/openshift_node/handlers/main.yml @@ -34,7 +34,7 @@    pause: seconds=15    when:    - (not skip_node_svc_handlers | default(False) | bool) -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool  - name: restart node    systemd: diff --git a/roles/openshift_node/meta/main.yml b/roles/openshift_node/meta/main.yml index 70057c7f3..86a2ca16f 100644 --- a/roles/openshift_node/meta/main.yml +++ b/roles/openshift_node/meta/main.yml @@ -12,12 +12,7 @@ galaxy_info:    categories:    - cloud  dependencies: -- role: openshift_node_facts -  when: not (openshift_node_upgrade_in_progress | default(False))  - role: lib_openshift -- role: lib_os_firewall -  when: not (openshift_node_upgrade_in_progress | default(False))  - role: openshift_cloud_provider    when: not (openshift_node_upgrade_in_progress | default(False))  - role: lib_utils -  when: openshift_node_upgrade_in_progress | default(False) diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml index 8a55cd428..1103fe4c9 100644 --- a/roles/openshift_node/tasks/config.yml +++ b/roles/openshift_node/tasks/config.yml @@ -4,7 +4,7 @@  - name: Pull container images    include_tasks: container_images.yml -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool  - name: Start and enable openvswitch service    systemd: @@ -13,7 +13,7 @@      state: started      daemon_reload: yes    when: -    - openshift.common.is_containerized | bool +    - openshift_is_containerized | bool      - openshift_node_use_openshift_sdn | default(true) | bool    register: ovs_start_result    until: not (ovs_start_result is failed) @@ -24,9 +24,9 @@      ovs_service_status_changed: "{{ ovs_start_result is changed }}"  - file: -    dest: "{{ (openshift_node_kubelet_args|default({'config':None})).config}}" +    dest: "{{ l2_openshift_node_kubelet_args['config'] }}"      state: directory -  when: openshift_node_kubelet_args is defined and 'config' in openshift_node_kubelet_args +  when: ('config' in l2_openshift_node_kubelet_args) | bool  # TODO: add the validate parameter when there is a validation command to run  - name: Create the Node config @@ -46,7 +46,7 @@      regexp: "^{{ item.key }}="      line: "{{ item.key }}={{ item.value }}"      create: true -  with_dict: "{{ openshift.node.env_vars | default({}) }}" +  with_dict: "{{ openshift_node_env_vars }}"    notify:      - restart node @@ -58,7 +58,7 @@  # restarted after the node restarts docker and it will take up to 60 seconds for  # systemd to start the master again  - when: -    - openshift.common.is_containerized | bool +    - openshift_is_containerized | bool      - not openshift_node_bootstrap    block:      - name: Wait for master API to become available before proceeding diff --git a/roles/openshift_node/tasks/container_images.yml b/roles/openshift_node/tasks/container_images.yml index 0b8c806ae..bb788e2f1 100644 --- a/roles/openshift_node/tasks/container_images.yml +++ b/roles/openshift_node/tasks/container_images.yml @@ -12,7 +12,7 @@  - name: Pre-pull openvswitch image    command: > -    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} +    docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Downloaded newer image' in pull_result.stdout"    when: diff --git a/roles/openshift_node/tasks/dnsmasq_install.yml b/roles/openshift_node/tasks/dnsmasq_install.yml index 0c8857b11..5e06ba032 100644 --- a/roles/openshift_node/tasks/dnsmasq_install.yml +++ b/roles/openshift_node/tasks/dnsmasq_install.yml @@ -12,7 +12,7 @@  - name: Install dnsmasq    package: name=dnsmasq state=installed -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml index b1fcf4068..55738d759 100644 --- a/roles/openshift_node/tasks/install.yml +++ b/roles/openshift_node/tasks/install.yml @@ -1,16 +1,16 @@  --- -- when: not openshift.common.is_containerized | bool +- when: not openshift_is_containerized | bool    block:    - name: Install Node package      package: -      name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" +      name: "{{ openshift_service_type }}-node{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"        state: present      register: result      until: result is succeeded    - name: Install sdn-ovs package      package: -      name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}" +      name: "{{ openshift_service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | lib_utils_oo_image_tag_to_rpm_version(include_dash=True) }}"        state: present      when:      - openshift_node_use_openshift_sdn | bool @@ -25,11 +25,11 @@      until: result is succeeded  - when: -  - openshift.common.is_containerized | bool +  - openshift_is_containerized | bool    - not l_is_node_system_container | bool    block:    - name: Pre-pull node image when containerized      command: > -      docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} +      docker pull {{ osn_image }}:{{ openshift_image_tag }}      register: pull_result      changed_when: "'Downloaded newer image' in pull_result.stdout" diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml index 8bd8f2536..eb362816a 100644 --- a/roles/openshift_node/tasks/main.yml +++ b/roles/openshift_node/tasks/main.yml @@ -3,7 +3,7 @@      msg: "SELinux is disabled, This deployment type requires that SELinux is enabled."    when:      - (not ansible_selinux or ansible_selinux.status != 'enabled') -    - deployment_type == 'openshift-enterprise' +    - openshift_deployment_type == 'openshift-enterprise'      - not openshift_use_crio  - include_tasks: dnsmasq_install.yml @@ -85,15 +85,15 @@  - name: GlusterFS storage plugin configuration    include_tasks: storage_plugins/glusterfs.yml -  when: "'glusterfs' in openshift.node.storage_plugin_deps" +  when: "'glusterfs' in osn_storage_plugin_deps"  - name: Ceph storage plugin configuration    include_tasks: storage_plugins/ceph.yml -  when: "'ceph' in openshift.node.storage_plugin_deps" +  when: "'ceph' in osn_storage_plugin_deps"  - name: iSCSI storage plugin configuration    include_tasks: storage_plugins/iscsi.yml -  when: "'iscsi' in openshift.node.storage_plugin_deps" +  when: "'iscsi' in osn_storage_plugin_deps"  ##### END Storage ##### diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml index 98978ec6f..06b879050 100644 --- a/roles/openshift_node/tasks/node_system_container.yml +++ b/roles/openshift_node/tasks/node_system_container.yml @@ -2,14 +2,14 @@  - name: Pre-pull node system container image    command: > -    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }} +    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Pulling layer' in pull_result.stdout"  - name: Install or Update node system container    oc_atomic_container:      name: "{{ openshift_service_type }}-node" -    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_image }}:{{ openshift_image_tag }}"      values:      - "DNS_DOMAIN={{ openshift.common.dns_domain }}"      - "DOCKER_SERVICE={{ openshift_docker_service_name }}.service" diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml index b61bc84c1..30ef9ef44 100644 --- a/roles/openshift_node/tasks/openvswitch_system_container.yml +++ b/roles/openshift_node/tasks/openvswitch_system_container.yml @@ -9,14 +9,14 @@  - name: Pre-pull OpenVSwitch system container image    command: > -    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }} +    atomic pull --storage=ostree {{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_ovs_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Pulling layer' in pull_result.stdout"  - name: Install or Update OpenVSwitch system container    oc_atomic_container:      name: openvswitch -    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}" +    image: "{{ 'docker:' if system_images_registry == 'docker' else system_images_registry + '/' }}{{ osn_ovs_image }}:{{ openshift_image_tag }}"      state: latest      values:        - "DOCKER_SERVICE={{ l_service_name }}" diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml index ab43ec049..92650e6b7 100644 --- a/roles/openshift_node/tasks/registry_auth.yml +++ b/roles/openshift_node/tasks/registry_auth.yml @@ -41,7 +41,7 @@    set_fact:      l_bind_docker_reg_auth: True    when: -    - openshift.common.is_containerized | bool +    - openshift_is_containerized | bool      - oreg_auth_user is defined      - >          (node_oreg_auth_credentials_stat.stat.exists diff --git a/roles/openshift_node/tasks/storage_plugins/ceph.yml b/roles/openshift_node/tasks/storage_plugins/ceph.yml index 52d80357e..e30f58a9a 100644 --- a/roles/openshift_node/tasks/storage_plugins/ceph.yml +++ b/roles/openshift_node/tasks/storage_plugins/ceph.yml @@ -1,6 +1,6 @@  ---  - name: Install Ceph storage plugin dependencies    package: name=ceph-common state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml index e60f57ae7..c04a6922a 100644 --- a/roles/openshift_node/tasks/storage_plugins/glusterfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/glusterfs.yml @@ -1,7 +1,7 @@  ---  - name: Install GlusterFS storage plugin dependencies    package: name=glusterfs-fuse state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/iscsi.yml b/roles/openshift_node/tasks/storage_plugins/iscsi.yml index d3a3668d5..a8048c42f 100644 --- a/roles/openshift_node/tasks/storage_plugins/iscsi.yml +++ b/roles/openshift_node/tasks/storage_plugins/iscsi.yml @@ -1,6 +1,6 @@  ---  - name: Install iSCSI storage plugin dependencies    package: name=iscsi-initiator-utils state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/openshift_node/tasks/storage_plugins/nfs.yml b/roles/openshift_node/tasks/storage_plugins/nfs.yml index 1484aa076..c2922644f 100644 --- a/roles/openshift_node/tasks/storage_plugins/nfs.yml +++ b/roles/openshift_node/tasks/storage_plugins/nfs.yml @@ -1,7 +1,7 @@  ---  - name: Install NFS storage plugin dependencies    package: name=nfs-utils state=present -  when: not openshift.common.is_atomic | bool +  when: not openshift_is_atomic | bool    register: result    until: result is succeeded diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml index 262ee698b..e33a4999f 100644 --- a/roles/openshift_node/tasks/systemd_units.yml +++ b/roles/openshift_node/tasks/systemd_units.yml @@ -2,13 +2,13 @@  - name: Install Node service file    template:      dest: "/etc/systemd/system/{{ openshift_service_type }}-node.service" -    src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}" +    src: "{{ openshift_is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"    when: not l_is_node_system_container | bool    notify:    - reload systemd units    - restart node -- when: openshift.common.is_containerized | bool +- when: openshift_is_containerized | bool    block:    - name: include node deps docker service file      include_tasks: config/install-node-deps-docker-service-file.yml diff --git a/roles/openshift_node/tasks/upgrade.yml b/roles/openshift_node/tasks/upgrade.yml index f0a013e45..02e417937 100644 --- a/roles/openshift_node/tasks/upgrade.yml +++ b/roles/openshift_node/tasks/upgrade.yml @@ -1,11 +1,10 @@  ---  # input variables:  # - l_docker_upgrade -# - openshift.common.is_atomic +# - openshift_is_atomic  # - node_config_hook  # - openshift_pkg_version -# - openshift.common.is_containerized -# - deployment_type +# - openshift_is_containerized  # - openshift_release  # tasks file for openshift_node_upgrade @@ -26,7 +25,7 @@    include_tasks: upgrade/rpm_upgrade_install.yml    vars:      openshift_version: "{{ openshift_pkg_version | default('') }}" -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool  - include_tasks: "{{ node_config_hook }}" diff --git a/roles/openshift_node/tasks/upgrade/config_changes.yml b/roles/openshift_node/tasks/upgrade/config_changes.yml index 439700df6..50044eb3e 100644 --- a/roles/openshift_node/tasks/upgrade/config_changes.yml +++ b/roles/openshift_node/tasks/upgrade/config_changes.yml @@ -1,7 +1,7 @@  ---  - name: Update systemd units    include_tasks: ../systemd_units.yml -  when: openshift.common.is_containerized +  when: openshift_is_containerized  - name: Update oreg value    yedit: diff --git a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml index 71f00dcd2..0a14e5174 100644 --- a/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml +++ b/roles/openshift_node/tasks/upgrade/containerized_upgrade_pull.yml @@ -1,13 +1,13 @@  ---  - name: Pre-pull node image    command: > -    docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }} +    docker pull {{ osn_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Downloaded newer image' in pull_result.stdout"  - name: Pre-pull openvswitch image    command: > -    docker pull {{ openshift.node.ovs_image }}:{{ openshift_image_tag }} +    docker pull {{ osn_ovs_image }}:{{ openshift_image_tag }}    register: pull_result    changed_when: "'Downloaded newer image' in pull_result.stdout"    when: openshift_use_openshift_sdn | bool diff --git a/roles/openshift_node/tasks/upgrade/restart.yml b/roles/openshift_node/tasks/upgrade/restart.yml index 45b0be0a0..bd6f42182 100644 --- a/roles/openshift_node/tasks/upgrade/restart.yml +++ b/roles/openshift_node/tasks/upgrade/restart.yml @@ -1,7 +1,7 @@  ---  # input variables:  # - openshift_service_type -# - openshift.common.is_containerized +# - openshift_is_containerized  # - openshift.common.hostname  # - openshift.master.api_port diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml index cc9a8f2d9..91a358095 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade.yml @@ -3,7 +3,7 @@  # - openshift_service_type  # - component  # - openshift_pkg_version -# - openshift.common.is_atomic +# - openshift_is_atomic  # Pre-pull new node rpm, but don't install  - name: download new node packages diff --git a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml index 32eeb76c6..c9094e05a 100644 --- a/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml +++ b/roles/openshift_node/tasks/upgrade/rpm_upgrade_install.yml @@ -3,7 +3,7 @@  # - openshift_service_type  # - component  # - openshift_pkg_version -# - openshift.common.is_atomic +# - openshift_is_atomic  # Install the pre-pulled RPM  # Note: dnsmasq is covered in it's own play.  openvswitch is included here diff --git a/roles/openshift_node/tasks/upgrade/stop_services.yml b/roles/openshift_node/tasks/upgrade/stop_services.yml index 2fff556e5..6d92516c3 100644 --- a/roles/openshift_node/tasks/upgrade/stop_services.yml +++ b/roles/openshift_node/tasks/upgrade/stop_services.yml @@ -19,7 +19,7 @@    - "{{ openshift_service_type }}-master-controllers"    - "{{ openshift_service_type }}-node"    failed_when: false -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool  - service:      name: docker @@ -40,4 +40,4 @@    - "{{ openshift_service_type }}-node"    - openvswitch    failed_when: false -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool diff --git a/roles/openshift_node/tasks/upgrade_pre.yml b/roles/openshift_node/tasks/upgrade_pre.yml index 7f591996c..3ae7dc6b6 100644 --- a/roles/openshift_node/tasks/upgrade_pre.yml +++ b/roles/openshift_node/tasks/upgrade_pre.yml @@ -11,7 +11,7 @@    command: "{{ ansible_pkg_mgr }} makecache"    register: result    until: result is succeeded -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool  - name: Check Docker image count    shell: "docker images -aq | wc -l" @@ -26,7 +26,7 @@    - l_docker_upgrade | bool  - include_tasks: upgrade/containerized_upgrade_pull.yml -  when: openshift.common.is_containerized | bool +  when: openshift_is_containerized | bool  # Prepull the rpms for docker upgrade, but don't install  - name: download docker upgrade rpm @@ -40,7 +40,7 @@  - include_tasks: upgrade/rpm_upgrade.yml    vars:      openshift_version: "{{ openshift_pkg_version | default('') }}" -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool  # https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory  - name: Check for swap usage diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2 index 261cac6f1..f091263f5 100644 --- a/roles/openshift_node/templates/node.yaml.v1.j2 +++ b/roles/openshift_node/templates/node.yaml.v1.j2 @@ -5,17 +5,15 @@ dnsBindAddress: 127.0.0.1:53  dnsRecursiveResolvConf: /etc/origin/node/resolv.conf  {% endif %}  dnsDomain: {{ openshift.common.dns_domain }} -{% if 'dns_ip' in openshift.node %} -dnsIP: {{ openshift.node.dns_ip }} -{% endif %} +dnsIP: {{ openshift_dns_ip }}  dockerConfig:    execHandlerName: "" -iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}" +iptablesSyncPeriod: "{{ openshift_node_iptables_sync_period }}"  imageConfig: -  format: {{ openshift.node.registry_url }} +  format: {{ oreg_url_node }}    latest: {{ openshift_node_image_config_latest }}  kind: NodeConfig -kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }} +kubeletArguments: {{  l2_openshift_node_kubelet_args  | default(None) | lib_utils_to_padded_yaml(level=1) }}  {% if openshift_use_crio %}    container-runtime:    - remote @@ -45,7 +43,7 @@ networkConfig:  {% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}     networkPluginName: {{ openshift_node_sdn_network_plugin_name }}  {% endif %} -{% if openshift.node.set_node_ip | bool %} +{% if openshift_set_node_ip | bool %}  nodeIP: {{ openshift.common.ip }}  {% endif %}  nodeName: {{ openshift.node.nodename }} @@ -68,8 +66,8 @@ volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes  {% if not (openshift_node_use_kuryr | default(False)) | bool %}  proxyArguments:    proxy-mode: -     - {{ openshift.node.proxy_mode }} +     - {{ openshift_node_proxy_mode }}  {% endif %}  volumeConfig:    localQuota: -    perFSGroup: {{ openshift.node.local_quota_per_fsgroup }} +    perFSGroup: {{ openshift_node_local_quota_per_fsgroup }} diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service index b174c7023..ae7b147a6 100644 --- a/roles/openshift_node/templates/openshift.docker.node.service +++ b/roles/openshift_node/templates/openshift.docker.node.service @@ -38,7 +38,7 @@ ExecStart=/usr/bin/docker run --name {{ openshift_service_type }}-node \    {% if openshift_use_nuage | default(false) -%} $NUAGE_ADDTL_BIND_MOUNTS {% endif -%} \    -v /dev:/dev $DOCKER_ADDTL_BIND_MOUNTS -v /etc/pki:/etc/pki:ro \    {% if l_bind_docker_reg_auth %} -v {{ oreg_auth_credentials_path }}:/root/.docker:ro{% endif %}\ -  {{ openshift.node.node_image }}:${IMAGE_VERSION} +  {{ osn_image }}:${IMAGE_VERSION}  ExecStartPost=/usr/bin/sleep 10  ExecStop=/usr/bin/docker stop {{ openshift_service_type }}-node  ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf diff --git a/roles/openshift_node/templates/openvswitch.docker.service b/roles/openshift_node/templates/openvswitch.docker.service index 37f091c76..1fc9b6e72 100644 --- a/roles/openshift_node/templates/openvswitch.docker.service +++ b/roles/openshift_node/templates/openvswitch.docker.service @@ -6,7 +6,7 @@ PartOf={{ openshift_docker_service_name }}.service  [Service]  EnvironmentFile=/etc/sysconfig/openvswitch  ExecStartPre=-/usr/bin/docker rm -f openvswitch -ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ openshift.node.ovs_image }}:${IMAGE_VERSION} +ExecStart=/usr/bin/docker run --name openvswitch --rm --privileged --net=host --pid=host -v /lib/modules:/lib/modules -v /run:/run -v /sys:/sys:ro -v /etc/origin/openvswitch:/etc/openvswitch {{ osn_ovs_image }}:${IMAGE_VERSION}  ExecStartPost=/usr/bin/sleep 5  ExecStop=/usr/bin/docker stop openvswitch  SyslogIdentifier=openvswitch diff --git a/roles/openshift_node_certificates/meta/main.yml b/roles/openshift_node_certificates/meta/main.yml index 0440bf11a..4362c644a 100644 --- a/roles/openshift_node_certificates/meta/main.yml +++ b/roles/openshift_node_certificates/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info:    categories:    - cloud    - system -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_node_certificates/tasks/main.yml b/roles/openshift_node_certificates/tasks/main.yml index 97f1fbbdd..e95e38fdf 100644 --- a/roles/openshift_node_certificates/tasks/main.yml +++ b/roles/openshift_node_certificates/tasks/main.yml @@ -31,7 +31,7 @@      node_certs_missing: "{{ true if openshift_certificates_redeploy | default(false) | bool                              else (False in (g_node_cert_stat_result.results                                              | default({}) -                                            | oo_collect(attribute='stat.exists') +                                            | lib_utils_oo_collect(attribute='stat.exists')                                              | list)) }}"  - name: Create openshift_generated_configs_dir if it does not exist @@ -51,11 +51,11 @@  - name: Generate the node client config    command: > -    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config -    {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | oo_collect('cafile') %} +    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm create-api-client-config +    {% for named_ca_certificate in hostvars[openshift_ca_host].openshift.master.named_certificates | default([]) | lib_utils_oo_collect('cafile') %}      --certificate-authority {{ named_ca_certificate }}      {% endfor %} -    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | oo_collect('path') %} +    {% for legacy_ca_certificate in g_master_legacy_ca_result.files | default([]) | lib_utils_oo_collect('path') %}      --certificate-authority {{ legacy_ca_certificate }}      {% endfor %}      --certificate-authority={{ openshift_ca_cert }} @@ -70,14 +70,14 @@    args:      creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}"    with_items: "{{ hostvars -                  | oo_select_keys(groups['oo_nodes_to_config']) -                  | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" +                  | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) +                  | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"    delegate_to: "{{ openshift_ca_host }}"    run_once: true  - name: Generate the node server certificate    command: > -    {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm ca create-server-cert +    {{ hostvars[openshift_ca_host]['first_master_client_binary'] }} adm ca create-server-cert      --cert={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt      --key={{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.key      --expire-days={{ openshift_node_cert_expire_days }} @@ -89,8 +89,8 @@    args:      creates: "{{ openshift_generated_configs_dir }}/node-{{ hostvars[item].openshift.common.hostname }}/server.crt"    with_items: "{{ hostvars -                  | oo_select_keys(groups['oo_nodes_to_config']) -                  | oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}" +                  | lib_utils_oo_select_keys(groups['oo_nodes_to_config']) +                  | lib_utils_oo_collect(attribute='inventory_hostname', filters={'node_certs_missing':True}) }}"    delegate_to: "{{ openshift_ca_host }}"    run_once: true diff --git a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py b/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py deleted file mode 100644 index 69069f2dc..000000000 --- a/roles/openshift_node_facts/filter_plugins/openshift_node_facts_filters.py +++ /dev/null @@ -1,32 +0,0 @@ -#!/usr/bin/python -# -*- coding: utf-8 -*- -''' -Custom filters for use in openshift-node -''' -from ansible import errors - - -class FilterModule(object): -    ''' Custom ansible filters for use by openshift_node_facts role''' - -    @staticmethod -    def node_get_dns_ip(openshift_dns_ip, hostvars): -        ''' Navigates the complicated logic of when to set dnsIP - -            In all situations if they've set openshift_dns_ip use that -            For 1.0/3.0 installs we use the openshift_master_cluster_vip, openshift_node_first_master_ip, else None -            For 1.1/3.1 installs we use openshift_master_cluster_vip, else None (product will use kube svc ip) -            For 1.2/3.2+ installs we set to the node's default interface ip -        ''' - -        if not issubclass(type(hostvars), dict): -            raise errors.AnsibleFilterError("|failed expects hostvars is a dict") - -        # We always use what they've specified if they've specified a value -        if openshift_dns_ip is not None: -            return openshift_dns_ip -        return hostvars['ansible_default_ipv4']['address'] - -    def filters(self): -        ''' returns a mapping of filters to methods ''' -        return {'node_get_dns_ip': self.node_get_dns_ip} diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml deleted file mode 100644 index c234a3000..000000000 --- a/roles/openshift_node_facts/tasks/main.yml +++ /dev/null @@ -1,25 +0,0 @@ ---- -- name: Set node facts -  openshift_facts: -    role: "{{ item.role }}" -    local_facts: "{{ item.local_facts }}" -  with_items: -  # Reset node labels to an empty dictionary. -  - role: node -    local_facts: -      labels: {} -  - role: node -    local_facts: -      annotations: "{{ openshift_node_annotations | default(none) }}" -      iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}" -      kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}" -      labels: "{{ openshift_node_labels | default(None) }}" -      registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}" -      storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}" -      set_node_ip: "{{ openshift_set_node_ip | default(None) }}" -      node_image: "{{ osn_image | default(None) }}" -      ovs_image: "{{ osn_ovs_image | default(None) }}" -      proxy_mode: "{{ openshift_node_proxy_mode | default('iptables') }}" -      local_quota_per_fsgroup: "{{ openshift_node_local_quota_per_fsgroup | default(None) }}" -      dns_ip: "{{ openshift_dns_ip | default(none) | node_get_dns_ip(hostvars[inventory_hostname])}}" -      env_vars: "{{ openshift_node_env_vars | default(None) }}" diff --git a/roles/openshift_node_group/defaults/main.yml b/roles/openshift_node_group/defaults/main.yml index 7c81409a5..cccdea66f 100644 --- a/roles/openshift_node_group/defaults/main.yml +++ b/roles/openshift_node_group/defaults/main.yml @@ -17,7 +17,13 @@ openshift_node_group_edits: []  openshift_node_group_namespace: openshift-node  openshift_node_group_labels: [] -openshift_imageconfig_format: "{{ oreg_url if oreg_url is defined else openshift.node.registry_url }}" +openshift_oreg_url_default_dict: +  origin: "openshift/origin-${component}:${version}" +  openshift-enterprise: openshift3/ose-${component}:${version} +openshift_oreg_url_default: "{{ openshift_oreg_url_default_dict[openshift_deployment_type] }}" +oreg_url_node: "{{ oreg_url | default(openshift_oreg_url_default) }}" + +openshift_imageconfig_format: "{{ oreg_url_node }}"  openshift_node_group_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}"  openshift_node_group_network_plugin_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"  openshift_node_group_network_plugin: "{{ openshift_node_group_network_plugin_default }}" diff --git a/roles/openshift_persistent_volumes/meta/main.yml b/roles/openshift_persistent_volumes/meta/main.yml index 48b0699ab..aea7616bf 100644 --- a/roles/openshift_persistent_volumes/meta/main.yml +++ b/roles/openshift_persistent_volumes/meta/main.yml @@ -11,3 +11,4 @@ galaxy_info:      - 7  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_persistent_volumes/tasks/pv.yml b/roles/openshift_persistent_volumes/tasks/pv.yml index 346605ff7..ef9ab7f5f 100644 --- a/roles/openshift_persistent_volumes/tasks/pv.yml +++ b/roles/openshift_persistent_volumes/tasks/pv.yml @@ -8,7 +8,7 @@  - name: Create PersistentVolumes    command: > -    {{ openshift.common.client_binary }} create +    {{ openshift_client_binary }} create      -f {{ mktemp.stdout }}/persistent-volumes.yml      --config={{ mktemp.stdout }}/admin.kubeconfig    register: pv_create_output diff --git a/roles/openshift_persistent_volumes/tasks/pvc.yml b/roles/openshift_persistent_volumes/tasks/pvc.yml index e44f9b18f..2c5519192 100644 --- a/roles/openshift_persistent_volumes/tasks/pvc.yml +++ b/roles/openshift_persistent_volumes/tasks/pvc.yml @@ -8,7 +8,7 @@  - name: Create PersistentVolumeClaims    command: > -    {{ openshift.common.client_binary }} create +    {{ openshift_client_binary }} create      -f {{ mktemp.stdout }}/persistent-volume-claims.yml      --config={{ mktemp.stdout }}/admin.kubeconfig    register: pvc_create_output diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 index d40417a9a..fac589a92 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume-claim.yml.j2 @@ -8,7 +8,7 @@ items:    metadata:      name: "{{ claim.name }}"    spec: -    accessModes: {{ claim.access_modes | to_padded_yaml(2, 2) }} +    accessModes: {{ claim.access_modes | lib_utils_to_padded_yaml(2, 2) }}      resources:        requests:          storage: "{{ claim.capacity }}" diff --git a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 index 9ec14208b..354561432 100644 --- a/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 +++ b/roles/openshift_persistent_volumes/templates/persistent-volume.yml.j2 @@ -16,6 +16,6 @@ items:    spec:      capacity:        storage: "{{ volume.capacity }}" -    accessModes: {{ volume.access_modes | to_padded_yaml(2, 2) }} -    {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | to_padded_yaml(3, 2) }} +    accessModes: {{ volume.access_modes | lib_utils_to_padded_yaml(2, 2) }} +    {{ (volume.storage.keys() | list)[0] }}: {{ volume.storage[(volume.storage.keys() | list)[0]] | lib_utils_to_padded_yaml(3, 2) }}  {% endfor %} diff --git a/roles/openshift_project_request_template/tasks/main.yml b/roles/openshift_project_request_template/tasks/main.yml index c31ee5795..3403840fb 100644 --- a/roles/openshift_project_request_template/tasks/main.yml +++ b/roles/openshift_project_request_template/tasks/main.yml @@ -6,7 +6,7 @@  - name: Generate default project template    command: | -    {{ openshift.common.client_binary | quote }} \ +    {{ openshift_client_binary | quote }} \        --config {{ openshift.common.config_base | quote }}/master/admin.kubeconfig \        --output yaml \        adm create-bootstrap-project-template \ @@ -28,7 +28,7 @@  - name: Create or update project request template    command: | -    {{ openshift.common.client_binary }} \ +    {{ openshift_client_binary }} \        --config {{ openshift.common.config_base }}/master/admin.kubeconfig \        --namespace {{ openshift_project_request_template_namespace | quote }} \        apply --filename {{ mktemp.stdout }} diff --git a/roles/openshift_prometheus/meta/main.yaml b/roles/openshift_prometheus/meta/main.yaml index 33188bb7e..69c5e0ee2 100644 --- a/roles/openshift_prometheus/meta/main.yaml +++ b/roles/openshift_prometheus/meta/main.yaml @@ -15,5 +15,6 @@ galaxy_info:    categories:    - openshift  dependencies: -- { role: lib_openshift } -- { role: openshift_facts } +- role: lib_openshift +- role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml index abc5dd476..749df5152 100644 --- a/roles/openshift_prometheus/tasks/install_prometheus.yaml +++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml @@ -5,7 +5,7 @@    oc_project:      state: present      name: "{{ openshift_prometheus_namespace }}" -    node_selector: "{{ openshift_prometheus_node_selector | oo_selector_to_string_list() }}" +    node_selector: "{{ openshift_prometheus_node_selector | lib_utils_oo_selector_to_string_list() }}"      description: Prometheus  # secrets @@ -16,7 +16,7 @@      namespace: "{{ openshift_prometheus_namespace }}"      contents:        - path: session_secret -        data: "{{ 43 | oo_random_word }}=" +        data: "{{ 43 | lib_utils_oo_random_word }}="    with_items:      - prometheus      - alerts @@ -39,7 +39,7 @@  # TODO remove this when annotations are supported by oc_serviceaccount  - name: annotate serviceaccount    command: > -    {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} +    {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}      serviceaccount prometheus      serviceaccounts.openshift.io/oauth-redirectreference.prom='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"prometheus"}}'      serviceaccounts.openshift.io/oauth-redirectreference.alerts='{"kind":"OAuthRedirectReference","apiVersion":"v1","reference":{"kind":"Route","name":"alerts"}}' @@ -97,7 +97,7 @@  # TODO remove this when annotations are supported by oc_service  - name: annotate prometheus service    command: > -    {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} +    {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}      service prometheus      prometheus.io/scrape='true'      prometheus.io/scheme=https @@ -105,7 +105,7 @@  - name: annotate alerts service    command: > -    {{ openshift.common.client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }} +    {{ openshift_client_binary }} annotate --overwrite -n {{ openshift_prometheus_namespace }}      service alerts 'service.alpha.openshift.io/serving-cert-secret-name=prometheus-alerts-tls'  # create prometheus and alerts routes diff --git a/roles/openshift_prometheus/tasks/main.yaml b/roles/openshift_prometheus/tasks/main.yaml index 38798e1f5..b859eb111 100644 --- a/roles/openshift_prometheus/tasks/main.yaml +++ b/roles/openshift_prometheus/tasks/main.yaml @@ -1,5 +1,5 @@  --- -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ item }}"    with_first_found:      - "{{ openshift_deployment_type }}.yml" diff --git a/roles/openshift_provisioners/meta/main.yaml b/roles/openshift_provisioners/meta/main.yaml index cb9278eb7..5ef352bcd 100644 --- a/roles/openshift_provisioners/meta/main.yaml +++ b/roles/openshift_provisioners/meta/main.yaml @@ -14,3 +14,4 @@ galaxy_info:  dependencies:  - role: lib_openshift  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml index e543d753c..de763f6cf 100644 --- a/roles/openshift_provisioners/tasks/install_efs.yaml +++ b/roles/openshift_provisioners/tasks/install_efs.yaml @@ -1,7 +1,7 @@  ---  - name: Check efs current replica count    command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs +    {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc provisioners-efs      -o jsonpath='{.spec.replicas}' -n {{openshift_provisioners_project}}    register: efs_replica_count    when: not ansible_check_mode @@ -58,7 +58,7 @@  # anyuid in order to run as root & chgrp shares with allocated gids  - name: "Check efs anyuid permissions"    command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig +    {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig      get scc/anyuid -o jsonpath='{.users}'    register: efs_anyuid    check_mode: no @@ -66,7 +66,7 @@  - name: "Set anyuid permissions for efs"    command: > -    {{ openshift.common.client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy +    {{ openshift_client_binary}} adm --config={{ mktemp.stdout }}/admin.kubeconfig policy      add-scc-to-user anyuid system:serviceaccount:{{openshift_provisioners_project}}:provisioners-efs    register: efs_output    failed_when: efs_output.rc == 1 and 'exists' not in efs_output.stderr diff --git a/roles/openshift_provisioners/tasks/oc_apply.yaml b/roles/openshift_provisioners/tasks/oc_apply.yaml index 49d03f203..a4ce53eae 100644 --- a/roles/openshift_provisioners/tasks/oc_apply.yaml +++ b/roles/openshift_provisioners/tasks/oc_apply.yaml @@ -1,7 +1,7 @@  ---  - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}    command: > -    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    {{ openshift_client_binary }} --config={{ kubeconfig }}      get {{file_content.kind}} {{file_content.metadata.name}}      -o jsonpath='{.metadata.resourceVersion}'      -n {{namespace}} @@ -11,7 +11,7 @@  - name: Applying {{file_name}}    command: > -    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    {{ openshift_client_binary }} --config={{ kubeconfig }}      apply -f {{ file_name }}      -n {{ namespace }}    register: generation_apply @@ -20,7 +20,7 @@  - name: Determine change status of {{file_content.kind}} {{file_content.metadata.name}}    command: > -    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    {{ openshift_client_binary }} --config={{ kubeconfig }}      get {{file_content.kind}} {{file_content.metadata.name}}      -o jsonpath='{.metadata.resourceVersion}'      -n {{namespace}} @@ -32,7 +32,7 @@  - name: Removing previous {{file_name}}    command: > -    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    {{ openshift_client_binary }} --config={{ kubeconfig }}      delete -f {{ file_name }}      -n {{ namespace }}    register: generation_delete @@ -42,7 +42,7 @@  - name: Recreating {{file_name}}    command: > -    {{ openshift.common.client_binary }} --config={{ kubeconfig }} +    {{ openshift_client_binary }} --config={{ kubeconfig }}      apply -f {{ file_name }}      -n {{ namespace }}    register: generation_apply diff --git a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml index 602dee773..ac12087ec 100644 --- a/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml +++ b/roles/openshift_provisioners/tasks/uninstall_provisioners.yaml @@ -5,7 +5,7 @@  # delete the deployment objects that we had created  - name: delete provisioner api objects    command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig +    {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig      delete {{ item }} --selector provisioners-infra -n {{ openshift_provisioners_project }} --ignore-not-found=true    with_items:      - dc @@ -15,7 +15,7 @@  # delete our old secrets  - name: delete provisioner secrets    command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig +    {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig      delete secret {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true    with_items:      - provisioners-efs @@ -26,7 +26,7 @@  # delete cluster role bindings  - name: delete cluster role bindings    command: > -    {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig +    {{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig      delete clusterrolebindings {{ item }} -n {{ openshift_provisioners_project }} --ignore-not-found=true    with_items:      - run-provisioners-efs diff --git a/roles/openshift_repos/tasks/main.yaml b/roles/openshift_repos/tasks/main.yaml index 2ada20767..911005bb6 100644 --- a/roles/openshift_repos/tasks/main.yaml +++ b/roles/openshift_repos/tasks/main.yaml @@ -40,9 +40,9 @@      - include_tasks: rhel_repos.yml        when:        - ansible_distribution == 'RedHat' -      - deployment_type == 'openshift-enterprise' +      - openshift_deployment_type == 'openshift-enterprise'        - rhsub_user is defined -      - rhsub_password is defined +      - rhsub_pass is defined      - include_tasks: centos_repos.yml        when: diff --git a/roles/openshift_repos/tasks/rhel_repos.yml b/roles/openshift_repos/tasks/rhel_repos.yml index c384cbe9a..8d16629cc 100644 --- a/roles/openshift_repos/tasks/rhel_repos.yml +++ b/roles/openshift_repos/tasks/rhel_repos.yml @@ -6,11 +6,10 @@    failed_when: repo_rhui.rc == 11  - name: Disable RHEL rhui repositories -  command: bash -c "yum-config-manager \ +  command: yum-config-manager \      --disable 'rhui-REGION-client-config-server-7' \      --disable 'rhui-REGION-rhel-server-rh-common' \ -    --disable 'rhui-REGION-rhel-server-releases' \ -    --disable 'rhui-REGION-client-config-server-7'" +    --disable 'rhui-REGION-rhel-server-releases'    when: repo_rhui.changed  - name: Ensure RHEL repositories are enabled diff --git a/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2 b/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2 new file mode 100644 index 000000000..db214af2c --- /dev/null +++ b/roles/openshift_repos/templates/CentOS-OpenShift-Origin37.repo.j2 @@ -0,0 +1,27 @@ +[centos-openshift-origin37] +name=CentOS OpenShift Origin +baseurl=http://mirror.centos.org/centos/7/paas/x86_64/openshift-origin37/ +enabled=1 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin37-testing] +name=CentOS OpenShift Origin Testing +baseurl=http://buildlogs.centos.org/centos/7/paas/x86_64/openshift-origin37/ +enabled={{ 1 if openshift_repos_enable_testing else 0 }} +gpgcheck=0 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin37-debuginfo] +name=CentOS OpenShift Origin DebugInfo +baseurl=http://debuginfo.centos.org/centos/7/paas/x86_64/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS + +[centos-openshift-origin37-source] +name=CentOS OpenShift Origin Source +baseurl=http://vault.centos.org/centos/7/paas/Source/openshift-origin37/ +enabled=0 +gpgcheck=1 +gpgkey=file:///etc/pki/rpm-gpg/RPM-GPG-KEY-CentOS-SIG-PaaS diff --git a/roles/openshift_sanitize_inventory/meta/main.yml b/roles/openshift_sanitize_inventory/meta/main.yml index f5b37186e..324ba06d8 100644 --- a/roles/openshift_sanitize_inventory/meta/main.yml +++ b/roles/openshift_sanitize_inventory/meta/main.yml @@ -12,4 +12,5 @@ galaxy_info:    categories:    - cloud    - system -dependencies: [] +dependencies: +- role: lib_utils diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml index 651d896cf..62d460272 100644 --- a/roles/openshift_sanitize_inventory/tasks/main.yml +++ b/roles/openshift_sanitize_inventory/tasks/main.yml @@ -3,37 +3,11 @@  # the user would also be aware of any deprecated variables they should note to adjust  - include_tasks: deprecations.yml -- name: Abort when conflicting deployment type variables are set -  when: -    - deployment_type is defined -    - openshift_deployment_type is defined -    - openshift_deployment_type != deployment_type -  fail: -    msg: |- -      openshift_deployment_type is set to "{{ openshift_deployment_type }}". -      deployment_type is set to "{{ deployment_type }}". -      To avoid unexpected results, this conflict is not allowed. -      deployment_type is deprecated in favor of openshift_deployment_type. -      Please specify only openshift_deployment_type, or make both the same. -  - name: Standardize on latest variable names    set_fact: -    # goal is to deprecate deployment_type in favor of openshift_deployment_type. -    # both will be accepted for now, but code should refer to the new name. -    # TODO: once this is well-documented, add deprecation notice if using old name. -    deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}" -    openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"      deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"      openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}" -- name: Abort when deployment type is invalid -  # this variable is required; complain early and clearly if it is invalid. -  when: openshift_deployment_type not in known_openshift_deployment_types -  fail: -    msg: |- -      Please set openshift_deployment_type to one of: -      {{ known_openshift_deployment_types | join(', ') }} -  - name: Normalize openshift_release    set_fact:      # Normalize release if provided, e.g. "v3.5" => "3.5" diff --git a/roles/openshift_sanitize_inventory/vars/main.yml b/roles/openshift_sanitize_inventory/vars/main.yml index 0fc2372d2..df15948d2 100644 --- a/roles/openshift_sanitize_inventory/vars/main.yml +++ b/roles/openshift_sanitize_inventory/vars/main.yml @@ -1,7 +1,4 @@  --- -# origin uses community packages named 'origin' -# openshift-enterprise uses Red Hat packages named 'atomic-openshift' -known_openshift_deployment_types: ['origin', 'openshift-enterprise']  __deprecation_header: "[DEPRECATION WARNING]:" diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml index cd7bda2c6..e478023f8 100644 --- a/roles/openshift_service_catalog/tasks/generate_certs.yml +++ b/roles/openshift_service_catalog/tasks/generate_certs.yml @@ -12,7 +12,7 @@  - name: Generate signing cert    command: > -    {{ openshift.common.client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert +    {{ openshift_client_binary }} adm --config=/etc/origin/master/admin.kubeconfig ca create-signer-cert      --key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt      --serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer @@ -60,7 +60,7 @@    register: apiserver_ca  - shell: > -    {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found" +    {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"    register: get_apiservices    changed_when: no diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml index 41a6691c9..cfecaa12c 100644 --- a/roles/openshift_service_catalog/tasks/install.yml +++ b/roles/openshift_service_catalog/tasks/install.yml @@ -6,10 +6,10 @@    register: mktemp    changed_when: False -- name: Set default image variables based on deployment_type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ item }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"  - name: Set service_catalog image facts @@ -38,7 +38,7 @@      - name: Make kube-service-catalog project network global        command: > -        {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog +        {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig adm pod-network make-projects-global kube-service-catalog  - include_tasks: generate_certs.yml @@ -88,14 +88,14 @@    vars:      original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"    when: -    - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) +    - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])  # only do this if we don't already have the updated role info  - name: update edit role for service catalog and pod preset access    command: > -    {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml +    {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/edit_sc_patch.yml    when: -    - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) +    - not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not edit_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])  - oc_obj:      name: admin @@ -111,14 +111,14 @@    vars:      original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"    when: -    - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) +    - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])  # only do this if we don't already have the updated role info  - name: update admin role for service catalog and pod preset access    command: > -    {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml +    {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/admin_sc_patch.yml    when: -    - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch']) +    - not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch', 'patch']) or not admin_yaml.results.results[0] | lib_utils_oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])  - oc_obj:      name: view @@ -134,14 +134,14 @@    vars:      original_content: "{{ view_yaml.results.results[0] | to_yaml }}"    when: -    - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) +    - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])  # only do this if we don't already have the updated role info  - name: update view role for service catalog access    command: > -    {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml +    {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig replace -f {{ mktemp.stdout }}/view_sc_patch.yml    when: -    - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch']) +    - not view_yaml.results.results[0] | lib_utils_oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])  - oc_adm_policy_user:      namespace: kube-service-catalog diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml index a832e1f85..aa32d0513 100644 --- a/roles/openshift_service_catalog/tasks/remove.yml +++ b/roles/openshift_service_catalog/tasks/remove.yml @@ -1,7 +1,7 @@  ---  - name: Remove Service Catalog APIServer    command: > -    {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog +    {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog  # TODO: this module doesn't currently remove this  #- name: Remove service catalog api service @@ -48,7 +48,7 @@  - name: Remove Service Catalog kube-system Role Bindinds    shell: > -    {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - +    {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process kube-system-service-catalog-role-bindings -n kube-system | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -  - oc_obj:      kind: template @@ -58,7 +58,7 @@  - name: Remove Service Catalog kube-service-catalog Role Bindinds    shell: > -    {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift.common.client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f - +    {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig process service-catalog-role-bindings -n kube-service-catalog | {{ openshift_client_binary }} --config=/etc/origin/master/admin.kubeconfig delete --ignore-not-found -f -  - oc_obj:      kind: template diff --git a/roles/openshift_service_catalog/tasks/start_api_server.yml b/roles/openshift_service_catalog/tasks/start_api_server.yml index b143292b6..84e542eaf 100644 --- a/roles/openshift_service_catalog/tasks/start_api_server.yml +++ b/roles/openshift_service_catalog/tasks/start_api_server.yml @@ -5,7 +5,7 @@      name: "{{ openshift.node.nodename }}"      kind: node      state: add -    labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | oo_dict_to_list_of_dict }}" +    labels: "{{ openshift_service_catalog_nodeselector | default ({'openshift-infra': 'apiserver'}) | lib_utils_oo_dict_to_list_of_dict }}"  # wait to see that the apiserver is available  - name: wait for api server to be ready diff --git a/roles/openshift_storage_glusterfs/defaults/main.yml b/roles/openshift_storage_glusterfs/defaults/main.yml index da34fab2a..4cbe262d2 100644 --- a/roles/openshift_storage_glusterfs/defaults/main.yml +++ b/roles/openshift_storage_glusterfs/defaults/main.yml @@ -6,16 +6,16 @@ openshift_storage_glusterfs_nodeselector: "glusterfs={{ openshift_storage_gluste  openshift_storage_glusterfs_use_default_selector: False  openshift_storage_glusterfs_storageclass: True  openshift_storage_glusterfs_storageclass_default: False -openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}" +openshift_storage_glusterfs_image: "{{ 'rhgs3/rhgs-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-centos' | quote }}"  openshift_storage_glusterfs_version: 'latest'  openshift_storage_glusterfs_block_deploy: True -openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}" +openshift_storage_glusterfs_block_image: "{{ 'rhgs3/rhgs-gluster-block-prov-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/glusterblock-provisioner' | quote }}"  openshift_storage_glusterfs_block_version: 'latest'  openshift_storage_glusterfs_block_host_vol_create: True  openshift_storage_glusterfs_block_host_vol_size: 100  openshift_storage_glusterfs_block_host_vol_max: 15  openshift_storage_glusterfs_s3_deploy: True -openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}" +openshift_storage_glusterfs_s3_image: "{{ 'rhgs3/rhgs-gluster-s3-server-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'gluster/gluster-object' | quote }}"  openshift_storage_glusterfs_s3_version: 'latest'  openshift_storage_glusterfs_s3_account: "{{ omit }}"  openshift_storage_glusterfs_s3_user: "{{ omit }}" @@ -29,7 +29,7 @@ openshift_storage_glusterfs_heketi_is_native: "{{ openshift_storage_glusterfs_is  openshift_storage_glusterfs_heketi_is_missing: True  openshift_storage_glusterfs_heketi_deploy_is_missing: True  openshift_storage_glusterfs_heketi_cli: 'heketi-cli' -openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}" +openshift_storage_glusterfs_heketi_image: "{{ 'rhgs3/rhgs-volmanager-rhel7' | quote if openshift_deployment_type == 'openshift-enterprise' else 'heketi/heketi' | quote }}"  openshift_storage_glusterfs_heketi_version: 'latest'  openshift_storage_glusterfs_heketi_admin_key: "{{ omit }}"  openshift_storage_glusterfs_heketi_user_key: "{{ omit }}" diff --git a/roles/openshift_storage_glusterfs/meta/main.yml b/roles/openshift_storage_glusterfs/meta/main.yml index 6a4ef942b..aa20245d5 100644 --- a/roles/openshift_storage_glusterfs/meta/main.yml +++ b/roles/openshift_storage_glusterfs/meta/main.yml @@ -12,4 +12,4 @@ galaxy_info:  dependencies:  - role: openshift_facts  - role: lib_openshift -- role: lib_os_firewall +- role: lib_utils diff --git a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml index 1664ecc1e..5b4c16740 100644 --- a/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/gluster_s3_deploy.yml @@ -63,7 +63,7 @@    until:    - "gluster_s3_pvcs.results.results[0]['items'] | count > 0"    # Pod's 'Bound' status must be True -  - "gluster_s3_pvcs.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2" +  - "gluster_s3_pvcs.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Bound'}) | map('bool') | select | list | count == 2"    delay: 10    retries: "{{ (glusterfs_timeout | int / 10) | int }}" @@ -108,6 +108,6 @@    until:    - "gluster_s3_pod.results.results[0]['items'] | count > 0"    # Pod's 'Ready' status must be True -  - "gluster_s3_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" +  - "gluster_s3_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"    delay: 10    retries: "{{ (glusterfs_timeout | int / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml index d6be8c726..e5dcdcab7 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterblock_deploy.yml @@ -61,6 +61,6 @@    until:    - "glusterblock_pod.results.results[0]['items'] | count > 0"    # Pod's 'Ready' status must be True -  - "glusterblock_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" +  - "glusterblock_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"    delay: 10    retries: "{{ (glusterfs_timeout | int / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml index d11023a39..001578406 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_common.yml @@ -2,7 +2,7 @@  - name: Make sure heketi-client is installed    package: name=heketi-client state=present    when: -  - not openshift.common.is_atomic | bool +  - not openshift_is_atomic | bool    - not glusterfs_heketi_is_native | bool    register: result    until: result is succeeded @@ -126,7 +126,7 @@    - "glusterfs_heketi_is_native"    - "deploy_heketi_pod.results.results[0]['items'] | count > 0"    # deploy-heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True -  - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" +  - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"  - name: Check for existing heketi pod    oc_obj: @@ -144,7 +144,7 @@    - "glusterfs_heketi_is_native"    - "heketi_pod.results.results[0]['items'] | count > 0"    # heketi is not missing when there are one or more pods with matching labels whose 'Ready' status is True -  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0" +  - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count > 0"  - name: Generate topology file    template: @@ -177,14 +177,14 @@  - name: Generate heketi admin key    set_fact: -    glusterfs_heketi_admin_key: "{{ 32 | oo_generate_secret }}" +    glusterfs_heketi_admin_key: "{{ 32 | lib_utils_oo_generate_secret }}"    when:    - glusterfs_heketi_is_native    - glusterfs_heketi_admin_key is undefined  - name: Generate heketi user key    set_fact: -    glusterfs_heketi_user_key: "{{ 32 | oo_generate_secret }}" +    glusterfs_heketi_user_key: "{{ 32 | lib_utils_oo_generate_secret }}"    until: "glusterfs_heketi_user_key != glusterfs_heketi_admin_key"    delay: 1    retries: 10 @@ -228,7 +228,7 @@    until:    - "deploy_heketi_pod.results.results[0]['items'] | count > 0"    # Pod's 'Ready' status must be True -  - "deploy_heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" +  - "deploy_heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"    delay: 10    retries: "{{ (glusterfs_timeout | int / 10) | int }}"    when: @@ -238,14 +238,14 @@  - name: Set heketi-cli command    set_fact: -    glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}" +    glusterfs_heketi_client: "{% if glusterfs_heketi_is_native %}{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} {% endif %}{{ glusterfs_heketi_cli }} -s http://{% if glusterfs_heketi_is_native %}localhost:8080{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %} --user admin {% if glusterfs_heketi_admin_key is defined %}--secret '{{ glusterfs_heketi_admin_key }}'{% endif %}"  - name: Verify heketi service    command: "{{ glusterfs_heketi_client }} cluster list"    changed_when: False  - name: Place heketi topology on heketi Pod -  shell: "{{ openshift.common.client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json" +  shell: "{{ openshift_client_binary }} exec --namespace={{ glusterfs_namespace }} -i {%if not glusterfs_heketi_is_missing %}{{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% else %}{{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }}{% endif %} -- bash -c 'mkdir -p {{ mktemp.stdout }} && cat > {{ mktemp.stdout }}/topology.json' < {{ mktemp.stdout }}/topology.json"    when:    - glusterfs_heketi_is_native diff --git a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml index 0c2fcb2c5..4cc82f1ad 100644 --- a/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml +++ b/roles/openshift_storage_glusterfs/tasks/glusterfs_deploy.yml @@ -21,7 +21,7 @@      name: "{{ hostvars[item].openshift.node.nodename }}"      kind: node      state: absent -    labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" +    labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"    with_items: "{{ groups.all }}"    when: "'openshift' in hostvars[item] and glusterfs_wipe" @@ -60,7 +60,7 @@      name: "{{ hostvars[item].openshift.node.nodename }}"      kind: node      state: add -    labels: "{{ glusterfs_nodeselector | oo_dict_to_list_of_dict }}" +    labels: "{{ glusterfs_nodeselector | lib_utils_oo_dict_to_list_of_dict }}"    with_items: "{{ glusterfs_nodes | default([]) }}"  - name: Copy GlusterFS DaemonSet template @@ -109,6 +109,6 @@    until:    - "glusterfs_pods.results.results[0]['items'] | count > 0"    # There must be as many pods with 'Ready' staus  True as there are nodes expecting those pods -  - "glusterfs_pods.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count" +  - "glusterfs_pods.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == glusterfs_nodes | count"    delay: 10    retries: "{{ (glusterfs_timeout | int / 10) | int }}" diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml index d23bd42b9..c0a8c53de 100644 --- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml +++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml @@ -4,7 +4,7 @@    register: setup_storage  - name: Copy heketi-storage list -  shell: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json" +  shell: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ deploy_heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} cat /tmp/heketi-storage.json > {{ mktemp.stdout }}/heketi-storage.json"  # This is used in the subsequent task  - name: Copy the admin client config @@ -15,7 +15,7 @@  # Need `command` here because heketi-storage.json contains multiple objects.  - name: Copy heketi DB to GlusterFS volume -  command: "{{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}" +  command: "{{ openshift_client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create -f {{ mktemp.stdout }}/heketi-storage.json -n {{ glusterfs_namespace }}"    when: setup_storage.rc == 0  - name: Wait for copy job to finish @@ -28,14 +28,14 @@    until:    - "'results' in heketi_job.results and heketi_job.results.results | count > 0"    # Pod's 'Complete' status must be True -  - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1" +  - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Complete'}) | map('bool') | select | list | count == 1"    delay: 10    retries: "{{ (glusterfs_timeout | int / 10) | int }}"    failed_when:    - "'results' in heketi_job.results"    - "heketi_job.results.results | count > 0"    # Fail when pod's 'Failed' status is True -  - "heketi_job.results.results | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1" +  - "heketi_job.results.results | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Failed'}) | map('bool') | select | list | count == 1"    when: setup_storage.rc == 0  - name: Delete deploy resources @@ -120,13 +120,13 @@    until:    - "heketi_pod.results.results[0]['items'] | count > 0"    # Pod's 'Ready' status must be True -  - "heketi_pod.results.results[0]['items'] | oo_collect(attribute='status.conditions') | oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1" +  - "heketi_pod.results.results[0]['items'] | lib_utils_oo_collect(attribute='status.conditions') | lib_utils_oo_collect(attribute='status', filters={'type': 'Ready'}) | map('bool') | select | list | count == 1"    delay: 10    retries: "{{ (glusterfs_timeout | int / 10) | int }}"  - name: Set heketi-cli command    set_fact: -    glusterfs_heketi_client: "{{ openshift.common.client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'" +    glusterfs_heketi_client: "{{ openshift_client_binary }} rsh --namespace={{ glusterfs_namespace }} {{ heketi_pod.results.results[0]['items'][0]['metadata']['name'] }} {{ glusterfs_heketi_cli }} -s http://localhost:8080 --user admin --secret '{{ glusterfs_heketi_admin_key }}'"  - name: Verify heketi service    command: "{{ glusterfs_heketi_client }} cluster list" diff --git a/roles/openshift_storage_nfs/meta/main.yml b/roles/openshift_storage_nfs/meta/main.yml index d61e6873a..3ae04e59f 100644 --- a/roles/openshift_storage_nfs/meta/main.yml +++ b/roles/openshift_storage_nfs/meta/main.yml @@ -10,5 +10,5 @@ galaxy_info:      versions:      - 7  dependencies: -- role: lib_os_firewall +- role: lib_utils  - role: openshift_facts diff --git a/roles/openshift_storage_nfs_lvm/meta/main.yml b/roles/openshift_storage_nfs_lvm/meta/main.yml index 50d94f6a3..de47708a5 100644 --- a/roles/openshift_storage_nfs_lvm/meta/main.yml +++ b/roles/openshift_storage_nfs_lvm/meta/main.yml @@ -16,3 +16,4 @@ galaxy_info:    - openshift  dependencies:  - role: openshift_facts +- role: lib_utils diff --git a/roles/openshift_storage_nfs_lvm/tasks/main.yml b/roles/openshift_storage_nfs_lvm/tasks/main.yml index c8e7b6d7c..ff92e59e5 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/main.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/main.yml @@ -2,7 +2,7 @@  # TODO -- this may actually work on atomic hosts  - fail:      msg: "openshift_storage_nfs_lvm is not compatible with atomic host" -  when: openshift.common.is_atomic | bool +  when: openshift_is_atomic | bool  - name: Create lvm volumes    lvol: vg={{osnl_volume_group}} lv={{ item }} size={{osnl_volume_size}}G diff --git a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml index 94dc63bd2..9a72adbdc 100644 --- a/roles/openshift_storage_nfs_lvm/tasks/nfs.yml +++ b/roles/openshift_storage_nfs_lvm/tasks/nfs.yml @@ -1,7 +1,7 @@  ---  - name: Install NFS server    package: name=nfs-utils state=present -  when: not openshift.common.is_containerized | bool +  when: not openshift_is_containerized | bool    register: result    until: result is succeeded diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml index e50d5371e..97e58ffac 100644 --- a/roles/openshift_version/tasks/main.yml +++ b/roles/openshift_version/tasks/main.yml @@ -1,10 +1,6 @@  ---  # Determine the openshift_version to configure if none has been specified or set previously. -- set_fact: -    is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}" -    is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}" -  # Block attempts to install origin without specifying some kind of version information.  # This is because the latest tags for origin are usually alpha builds, which should not  # be used by default. Users must indicate what they want. @@ -16,7 +12,7 @@        component images to use. You may want the latest (usually alpha) releases or        a more stable release. (Suggestion: add openshift_release="x.y" to inventory.)    when: -  - is_containerized | bool +  - openshift_is_containerized | bool    - openshift.common.deployment_type == 'origin'    - openshift_release is not defined    - openshift_image_tag is not defined @@ -94,11 +90,11 @@    block:    - name: Set openshift_version for rpm installation      include_tasks: set_version_rpm.yml -    when: not is_containerized | bool +    when: not openshift_is_containerized | bool    - name: Set openshift_version for containerized installation      include_tasks: set_version_containerized.yml -    when: is_containerized | bool +    when: openshift_is_containerized | bool    - block:      - name: Get available {{ openshift_service_type}} version @@ -121,8 +117,8 @@        - openshift_pkg_version is not defined        - openshift_image_tag is not defined      when: -    - is_containerized | bool -    - not is_atomic | bool +    - openshift_is_containerized | bool +    - not openshift_is_atomic | bool    # Warn if the user has provided an openshift_image_tag but is not doing a containerized install    # NOTE: This will need to be modified/removed for future container + rpm installations work. @@ -132,7 +128,7 @@          openshift_image_tag is used for containerized installs. If you are trying to          specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.      when: -    - not is_containerized | bool +    - not openshift_is_containerized | bool      - openshift_image_tag is defined    # At this point we know openshift_version is set appropriately. Now we set @@ -182,14 +178,14 @@        msg: "No OpenShift version available; please ensure your systems are fully registered and have access to appropriate yum repositories."      name: Abort if openshift_pkg_version was not set      when: -    - not is_containerized | bool +    - not openshift_is_containerized | bool      - openshift_version == '0.0'    # We can't map an openshift_release to full rpm version like we can with containers; make sure    # the rpm version we looked up matches the release requested and error out if not.    - name: For an RPM install, abort when the release requested does not match the available version.      when: -    - not is_containerized | bool +    - not openshift_is_containerized | bool      - openshift_release is defined      assert:        that: diff --git a/roles/openshift_version/tasks/set_version_containerized.yml b/roles/openshift_version/tasks/set_version_containerized.yml index 71f957b78..e02a75eab 100644 --- a/roles/openshift_version/tasks/set_version_containerized.yml +++ b/roles/openshift_version/tasks/set_version_containerized.yml @@ -62,4 +62,4 @@  # dangly +c0mm1t-offset tags in the version. See also,  # openshift_facts.py  - set_fact: -    openshift_version: "{{ openshift_version | oo_chomp_commit_offset }}" +    openshift_version: "{{ openshift_version | lib_utils_oo_chomp_commit_offset }}" diff --git a/roles/template_service_broker/meta/main.yml b/roles/template_service_broker/meta/main.yml index ab5a0cf08..f1b56b771 100644 --- a/roles/template_service_broker/meta/main.yml +++ b/roles/template_service_broker/meta/main.yml @@ -11,3 +11,5 @@ galaxy_info:      - 7    categories:    - cloud +dependencies: +- role: lib_utils diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml index 1253c1133..765263db5 100644 --- a/roles/template_service_broker/tasks/install.yml +++ b/roles/template_service_broker/tasks/install.yml @@ -1,9 +1,9 @@  ---  # Fact setting -- name: Set default image variables based on deployment type +- name: Set default image variables based on openshift_deployment_type    include_vars: "{{ item }}"    with_first_found: -    - "{{ openshift_deployment_type | default(deployment_type) }}.yml" +    - "{{ openshift_deployment_type }}.yml"      - "default_images.yml"  - name: set template_service_broker facts @@ -44,16 +44,16 @@  - name: Apply template file    shell: > -    {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" +    {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"      --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"      --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"      --param NODE_SELECTOR={{ template_service_broker_selector | to_json | quote }} -    | {{ openshift.common.client_binary }} apply -f - +    | {{ openshift_client_binary }} apply -f -  # reconcile with rbac  - name: Reconcile with RBAC file    shell: > -    {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift.common.client_binary }} auth reconcile -f - +    {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_rbac_file }}" | {{ openshift_client_binary }} auth reconcile -f -  # Check that the TSB is running  - name: Verify that TSB is running @@ -80,7 +80,7 @@  # Register with broker  - name: Register TSB with broker    shell: > -    {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift.common.client_binary }} apply -f - +    {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" --param CA_BUNDLE="{{ __ca_bundle.content }}" | {{ openshift_client_binary }} apply -f -  - file:      state: absent diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml index 8b5593ff9..8b4d798db 100644 --- a/roles/template_service_broker/tasks/remove.yml +++ b/roles/template_service_broker/tasks/remove.yml @@ -13,11 +13,11 @@  - name: Delete TSB broker    shell: > -    {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f - +    {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -  - name: Delete TSB objects    shell: > -    {{ openshift.common.client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift.common.client_binary }} delete --ignore-not-found -f - +    {{ openshift_client_binary }} process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | {{ openshift_client_binary }} delete --ignore-not-found -f -  - name: empty out tech preview extension file for service console UI    copy: diff --git a/roles/tuned/tasks/main.yml b/roles/tuned/tasks/main.yml index e95d274d5..4a28d47b2 100644 --- a/roles/tuned/tasks/main.yml +++ b/roles/tuned/tasks/main.yml @@ -11,7 +11,7 @@    block:    - name: Set tuned OpenShift variables      set_fact: -      openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}" +      openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift_is_atomic else 'virtual-guest' }}"    - name: Ensure directory structure exists      file:  | 
