summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.redhat-ci.inventory1
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--BUILD.md10
-rw-r--r--README_CONTAINER_IMAGE.md20
-rw-r--r--examples/certificate-check-upload.yaml6
-rw-r--r--examples/certificate-check-volume.yaml6
-rw-r--r--examples/scheduled-certcheck-upload.yaml2
-rw-r--r--examples/scheduled-certcheck-volume.yaml2
-rwxr-xr-xhack/build-images.sh4
-rwxr-xr-xhack/push-release.sh2
-rw-r--r--images/installer/Dockerfile8
-rw-r--r--images/installer/Dockerfile.rhel72
-rw-r--r--inventory/byo/hosts.origin.example48
-rw-r--r--inventory/byo/hosts.ose.example49
-rw-r--r--openshift-ansible.spec255
-rw-r--r--playbooks/adhoc/uninstall.yml12
-rw-r--r--playbooks/byo/openshift-checks/README.md65
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/default.yaml (renamed from playbooks/certificate_expiry/default.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml (renamed from playbooks/certificate_expiry/easy-mode-upload.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml (renamed from playbooks/certificate_expiry/easy-mode.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml (renamed from playbooks/certificate_expiry/html_and_json_default_paths.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml (renamed from playbooks/certificate_expiry/html_and_json_timestamp.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml (renamed from playbooks/certificate_expiry/longer-warning-period-json-results.yaml)0
-rw-r--r--playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml (renamed from playbooks/certificate_expiry/longer_warning_period.yaml)0
l---------playbooks/byo/openshift-checks/certificate_expiry/roles1
-rw-r--r--playbooks/byo/openshift-checks/health.yml3
-rw-r--r--playbooks/byo/openshift-checks/pre-install.yml3
-rw-r--r--playbooks/byo/openshift-cluster/config.yml13
-rw-r--r--playbooks/byo/openshift-cluster/redeploy-certificates.yml2
-rw-r--r--playbooks/byo/openshift-preflight/README.md43
-rw-r--r--playbooks/byo/openshift-preflight/check.yml16
l---------playbooks/certificate_expiry1
l---------playbooks/certificate_expiry/roles1
-rw-r--r--playbooks/common/openshift-checks/health.yml11
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml11
l---------playbooks/common/openshift-checks/roles (renamed from playbooks/byo/openshift-preflight/roles)0
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/ca.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml33
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml70
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml10
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml10
-rw-r--r--playbooks/common/openshift-cluster/validate_hostnames.yml4
-rw-r--r--playbooks/common/openshift-master/config.yml1
-rw-r--r--playbooks/common/openshift-node/config.yml2
-rw-r--r--playbooks/libvirt/openshift-cluster/config.yml2
-rw-r--r--requirements.txt1
-rw-r--r--roles/calico/defaults/main.yaml5
-rw-r--r--roles/calico/tasks/gen_certs.yml17
-rw-r--r--roles/calico/tasks/main.yml39
-rw-r--r--roles/calico/templates/10-calico.conf.j2 (renamed from roles/calico/templates/calicoctl.conf.j2)2
-rw-r--r--roles/calico/templates/calico.service.j24
-rw-r--r--roles/calico/templates/calicoctl.cfg.j2 (renamed from roles/calico/templates/10-calico.cfg.j2)2
-rw-r--r--roles/calico_master/templates/calico-policy-controller.yml.j26
-rw-r--r--roles/docker/README.md2
-rw-r--r--roles/docker/tasks/package_docker.yml15
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml16
-rw-r--r--roles/lib_openshift/library/oc_adm_ca_server_cert.py27
-rw-r--r--roles/lib_openshift/library/oc_adm_manage_node.py27
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_group.py27
-rw-r--r--roles/lib_openshift/library/oc_adm_policy_user.py27
-rw-r--r--roles/lib_openshift/library/oc_adm_registry.py97
-rw-r--r--roles/lib_openshift/library/oc_adm_router.py97
-rw-r--r--roles/lib_openshift/library/oc_atomic_container.py8
-rw-r--r--roles/lib_openshift/library/oc_clusterrole.py27
-rw-r--r--roles/lib_openshift/library/oc_configmap.py27
-rw-r--r--roles/lib_openshift/library/oc_edit.py27
-rw-r--r--roles/lib_openshift/library/oc_env.py27
-rw-r--r--roles/lib_openshift/library/oc_group.py27
-rw-r--r--roles/lib_openshift/library/oc_image.py27
-rw-r--r--roles/lib_openshift/library/oc_label.py27
-rw-r--r--roles/lib_openshift/library/oc_obj.py27
-rw-r--r--roles/lib_openshift/library/oc_objectvalidator.py27
-rw-r--r--roles/lib_openshift/library/oc_process.py29
-rw-r--r--roles/lib_openshift/library/oc_project.py27
-rw-r--r--roles/lib_openshift/library/oc_pvc.py27
-rw-r--r--roles/lib_openshift/library/oc_route.py27
-rw-r--r--roles/lib_openshift/library/oc_scale.py27
-rw-r--r--roles/lib_openshift/library/oc_secret.py46
-rw-r--r--roles/lib_openshift/library/oc_service.py101
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount.py27
-rw-r--r--roles/lib_openshift/library/oc_serviceaccount_secret.py27
-rw-r--r--roles/lib_openshift/library/oc_user.py27
-rw-r--r--roles/lib_openshift/library/oc_version.py27
-rw-r--r--roles/lib_openshift/library/oc_volume.py27
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/src/ansible/oc_atomic_container.py8
-rw-r--r--roles/lib_openshift/src/ansible/oc_secret.py1
-rw-r--r--roles/lib_openshift/src/ansible/oc_service.py1
-rw-r--r--roles/lib_openshift/src/class/oc_adm_registry.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_router.py2
-rw-r--r--roles/lib_openshift/src/class/oc_process.py2
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py7
-rw-r--r--roles/lib_openshift/src/class/oc_service.py5
-rw-r--r--roles/lib_openshift/src/doc/secret6
-rw-r--r--roles/lib_openshift/src/doc/service7
-rw-r--r--roles/lib_openshift/src/lib/base.py27
-rw-r--r--roles/lib_openshift/src/lib/secret.py5
-rw-r--r--roles/lib_openshift/src/lib/service.py61
-rwxr-xr-xroles/lib_openshift/src/test/integration/oc_service.yml5
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_registry.py3
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_adm_router.py3
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_secret.py3
-rwxr-xr-xroles/lib_openshift/src/test/unit/test_oc_service.py182
-rw-r--r--roles/nuage_master/defaults/main.yaml4
-rw-r--r--roles/nuage_master/tasks/main.yaml8
-rw-r--r--roles/nuage_master/templates/nuage-openshift-monitor.j210
-rw-r--r--roles/nuage_node/tasks/main.yaml15
-rw-r--r--roles/nuage_node/templates/vsp-openshift.j22
-rw-r--r--roles/nuage_node/vars/main.yaml3
-rw-r--r--roles/openshift_ca/tasks/main.yml32
-rw-r--r--roles/openshift_ca/vars/main.yml3
-rw-r--r--roles/openshift_certificate_expiry/README.md48
-rw-r--r--roles/openshift_docker_facts/tasks/main.yml3
-rwxr-xr-xroles/openshift_examples/examples-sync.sh11
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml254
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml58
-rw-r--r--roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml254
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json28
-rw-r--r--roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json20
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml1261
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml149
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml157
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json3
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json2
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml49
-rw-r--r--roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml158
-rw-r--r--roles/openshift_excluder/tasks/disable.yml6
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py12
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py26
-rw-r--r--roles/openshift_health_checker/callback_plugins/zz_failure_summary.py87
-rwxr-xr-xroles/openshift_health_checker/library/aos_version.py105
-rw-r--r--roles/openshift_health_checker/library/ocutil.py74
-rw-r--r--roles/openshift_health_checker/library/rpm_version.py127
-rw-r--r--roles/openshift_health_checker/meta/main.yml1
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py22
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py6
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py166
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py185
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/__init__.py0
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/curator.py61
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py217
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/fluentd.py170
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/kibana.py229
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/logging.py96
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py27
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py42
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py78
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py108
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py16
-rw-r--r--roles/openshift_health_checker/test/aos_version_test.py87
-rw-r--r--roles/openshift_health_checker/test/curator_test.py68
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py36
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py188
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py224
-rw-r--r--roles/openshift_health_checker/test/elasticsearch_test.py180
-rw-r--r--roles/openshift_health_checker/test/fluentd_test.py109
-rw-r--r--roles/openshift_health_checker/test/kibana_test.py218
-rw-r--r--roles/openshift_health_checker/test/logging_check_test.py137
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py62
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py89
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py126
-rw-r--r--roles/openshift_health_checker/test/rpm_version_test.py82
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/object_storage.yml16
-rw-r--r--roles/openshift_hosted/tasks/registry/storage/s3.yml4
-rw-r--r--roles/openshift_hosted/tasks/router/router.yml4
-rw-r--r--roles/openshift_hosted/templates/registry_config.j24
-rw-r--r--roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml4
-rw-r--r--roles/openshift_logging/README.md31
-rw-r--r--roles/openshift_logging/defaults/main.yml9
-rw-r--r--roles/openshift_logging/files/logging-deployer-sa.yaml6
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py2
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py2
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml120
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml8
-rw-r--r--roles/openshift_logging/tasks/generate_clusterrolebindings.yaml13
-rw-r--r--roles/openshift_logging/tasks/generate_clusterroles.yaml11
-rw-r--r--roles/openshift_logging/tasks/generate_configmaps.yaml178
-rw-r--r--roles/openshift_logging/tasks/generate_deploymentconfigs.yaml65
-rw-r--r--roles/openshift_logging/tasks/generate_pvcs.yaml47
-rw-r--r--roles/openshift_logging/tasks/generate_rolebindings.yaml12
-rw-r--r--roles/openshift_logging/tasks/generate_routes.yaml169
-rw-r--r--roles/openshift_logging/tasks/generate_secrets.yaml129
-rw-r--r--roles/openshift_logging/tasks/generate_serviceaccounts.yaml14
-rw-r--r--roles/openshift_logging/tasks/generate_services.yaml119
-rw-r--r--roles/openshift_logging/tasks/install_curator.yaml53
-rw-r--r--roles/openshift_logging/tasks/install_elasticsearch.yaml118
-rw-r--r--roles/openshift_logging/tasks/install_fluentd.yaml54
-rw-r--r--roles/openshift_logging/tasks/install_kibana.yaml60
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml317
-rw-r--r--roles/openshift_logging/tasks/install_mux.yaml67
-rw-r--r--roles/openshift_logging/tasks/install_support.yaml73
-rw-r--r--roles/openshift_logging/tasks/main.yaml21
-rw-r--r--roles/openshift_logging/tasks/oc_apply.yaml52
-rw-r--r--roles/openshift_logging/tasks/oc_secret.yaml7
-rw-r--r--roles/openshift_logging/tasks/procure_server_certs.yaml2
-rw-r--r--roles/openshift_logging/tasks/set_es_storage.yaml80
-rw-r--r--roles/openshift_logging/tasks/start_cluster.yaml156
-rw-r--r--roles/openshift_logging/tasks/stop_cluster.yaml153
-rw-r--r--roles/openshift_logging/tasks/upgrade_logging.yaml48
-rw-r--r--roles/openshift_logging/templates/clusterrole.j221
-rw-r--r--roles/openshift_logging/templates/clusterrolebinding.j224
-rw-r--r--roles/openshift_logging/templates/es-storage-emptydir.partial1
-rw-r--r--roles/openshift_logging/templates/es-storage-hostpath.partial2
-rw-r--r--roles/openshift_logging/templates/es-storage-pvc.partial2
-rw-r--r--roles/openshift_logging/templates/fluentd.j2167
-rw-r--r--roles/openshift_logging/templates/secret.j29
-rw-r--r--roles/openshift_logging/templates/service.j234
-rw-r--r--roles/openshift_logging/templates/serviceaccount.j216
-rw-r--r--roles/openshift_logging_curator/defaults/main.yml33
-rw-r--r--roles/openshift_logging_curator/files/curator.yml (renamed from roles/openshift_logging/files/curator.yml)0
-rw-r--r--roles/openshift_logging_curator/meta/main.yaml15
-rw-r--r--roles/openshift_logging_curator/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml113
-rw-r--r--roles/openshift_logging_curator/templates/curator.j2 (renamed from roles/openshift_logging/templates/curator.j2)6
-rw-r--r--roles/openshift_logging_curator/vars/main.yml3
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml57
-rw-r--r--roles/openshift_logging_elasticsearch/files/es_migration.sh (renamed from roles/openshift_logging/files/es_migration.sh)0
-rw-r--r--roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml9
-rw-r--r--roles/openshift_logging_elasticsearch/meta/main.yaml15
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/determine_version.yaml19
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml278
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2 (renamed from roles/openshift_logging/templates/elasticsearch-logging.yml.j2)33
-rw-r--r--roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2 (renamed from roles/openshift_logging/templates/elasticsearch.yml.j2)5
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j2 (renamed from roles/openshift_logging/templates/es.j2)33
-rw-r--r--roles/openshift_logging_elasticsearch/templates/pvc.j2 (renamed from roles/openshift_logging/templates/pvc.j2)2
-rw-r--r--roles/openshift_logging_elasticsearch/templates/rolebinding.j2 (renamed from roles/openshift_logging/templates/rolebinding.j2)0
-rw-r--r--roles/openshift_logging_elasticsearch/vars/main.yml12
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml59
-rw-r--r--roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml (renamed from roles/openshift_logging/files/fluentd-throttle-config.yaml)0
-rw-r--r--roles/openshift_logging_fluentd/files/secure-forward.conf (renamed from roles/openshift_logging/files/secure-forward.conf)0
-rw-r--r--roles/openshift_logging_fluentd/meta/main.yaml15
-rw-r--r--roles/openshift_logging_fluentd/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_fluentd/tasks/label_and_wait.yaml10
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml206
-rw-r--r--roles/openshift_logging_fluentd/templates/fluent.conf.j278
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j2123
-rw-r--r--roles/openshift_logging_fluentd/vars/main.yml5
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml41
-rw-r--r--roles/openshift_logging_kibana/meta/main.yaml15
-rw-r--r--roles/openshift_logging_kibana/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml256
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j2 (renamed from roles/openshift_logging/templates/kibana.j2)63
-rw-r--r--roles/openshift_logging_kibana/templates/oauth-client.j2 (renamed from roles/openshift_logging/templates/oauth-client.j2)7
-rw-r--r--roles/openshift_logging_kibana/templates/route_reencrypt.j2 (renamed from roles/openshift_logging/templates/route_reencrypt.j2)0
-rw-r--r--roles/openshift_logging_kibana/vars/main.yml3
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml48
-rw-r--r--roles/openshift_logging_mux/files/fluent.conf (renamed from roles/openshift_logging/files/fluent.conf)0
-rw-r--r--roles/openshift_logging_mux/files/secure-forward.conf24
-rw-r--r--roles/openshift_logging_mux/meta/main.yaml15
-rw-r--r--roles/openshift_logging_mux/tasks/determine_version.yaml17
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml201
-rw-r--r--roles/openshift_logging_mux/templates/mux.j2 (renamed from roles/openshift_logging/templates/mux.j2)32
-rw-r--r--roles/openshift_logging_mux/vars/main.yml3
-rw-r--r--roles/openshift_master/README.md21
-rw-r--r--roles/openshift_master/tasks/main.yml1
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j27
-rw-r--r--roles/openshift_master_certificates/tasks/main.yml6
-rw-r--r--roles/openshift_master_facts/tasks/main.yml2
-rw-r--r--roles/openshift_metrics/tasks/generate_certificates.yaml2
-rw-r--r--roles/openshift_metrics/tasks/main.yaml2
-rw-r--r--roles/openshift_metrics/tasks/setup_certificate.yaml2
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j21
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j21
-rw-r--r--roles/openshift_metrics/templates/heapster.j21
-rw-r--r--roles/openshift_node/README.md9
-rw-r--r--roles/openshift_node/handlers/main.yml3
-rw-r--r--roles/openshift_node/tasks/main.yml11
-rw-r--r--roles/openshift_node_upgrade/tasks/main.yml4
-rw-r--r--roles/openshift_node_upgrade/tasks/restart.yml8
-rw-r--r--roles/openshift_node_upgrade/tasks/rpm_upgrade.yml15
l---------roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j21
l---------roles/openshift_node_upgrade/templates/origin-node.service.j21
-rw-r--r--roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py2
-rw-r--r--roles/openshift_version/tasks/main.yml11
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile2
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec17
-rw-r--r--test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec17
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml14
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml9
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml9
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml9
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml9
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml12
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml15
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml15
-rw-r--r--test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml4
-rw-r--r--test/integration/openshift_health_checker/preflight/preflight_test.go2
-rw-r--r--test/integration/openshift_health_checker/teardown_container.yml1
308 files changed, 9739 insertions, 3281 deletions
diff --git a/.redhat-ci.inventory b/.redhat-ci.inventory
index 3c8296055..23bc9923c 100644
--- a/.redhat-ci.inventory
+++ b/.redhat-ci.inventory
@@ -9,6 +9,7 @@ ansible_python_interpreter=/usr/bin/python3
deployment_type=origin
openshift_image_tag="{{ lookup('env', 'OPENSHIFT_IMAGE_TAG') }}"
openshift_master_default_subdomain="{{ lookup('env', 'RHCI_ocp_node1_IP') }}.xip.io"
+openshift_check_min_host_memory_gb=1.9
[masters]
ocp-master
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 200f8d7f3..85a9820e8 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.6.68-1 ./
+3.6.98-1 ./
diff --git a/BUILD.md b/BUILD.md
index 7ac0187ee..d9de3f5aa 100644
--- a/BUILD.md
+++ b/BUILD.md
@@ -26,16 +26,18 @@ tito build --rpm
## Build an openshift-ansible container image
+**NOTE**: the examples below use "openshift-ansible" as the name of the image to build for simplicity and illustration purposes, and also to prevent potential confusion between custom built images and official releases. See [README_CONTAINER_IMAGE.md](README_CONTAINER_IMAGE.md) for details about the released container images for openshift-ansible.
+
To build a container image of `openshift-ansible` using standalone **Docker**:
cd openshift-ansible
- docker build -f images/installer/Dockerfile -t openshift/openshift-ansible .
+ docker build -f images/installer/Dockerfile -t openshift-ansible .
### Building on OpenShift
To build an openshift-ansible image using an **OpenShift** [build and image stream](https://docs.openshift.org/latest/architecture/core_concepts/builds_and_image_streams.html) the straightforward command would be:
- oc new-build docker.io/aweiteka/playbook2image~https://github.com/openshift/openshift-ansible
+ oc new-build registry.centos.org/openshift/playbook2image~https://github.com/openshift/openshift-ansible
However: because the `Dockerfile` for this repository is not in the top level directory, and because we can't change the build context to the `images/installer` path as it would cause the build to fail, the `oc new-app` command above will create a build configuration using the *source to image* strategy, which is the default approach of the [playbook2image](https://github.com/openshift/playbook2image) base image. This does build an image successfully, but unfortunately the resulting image will be missing some customizations that are handled by the [Dockerfile](images/installer/Dockerfile) in this repo.
@@ -48,7 +50,7 @@ At the time of this writing there is no straightforward option to [set the docke
```
curl -s https://raw.githubusercontent.com/openshift/openshift-ansible/master/images/installer/Dockerfile |
oc new-build -D - \
- --docker-image=docker.io/aweiteka/playbook2image \
+ --docker-image=registry.centos.org/openshift/playbook2image \
https://github.com/openshift/openshift-ansible
```
@@ -76,5 +78,5 @@ Once the container image is built, we can import it into the OSTree
storage:
```
-atomic pull --storage ostree docker:openshift/openshift-ansible:latest
+atomic pull --storage ostree docker:openshift-ansible:latest
```
diff --git a/README_CONTAINER_IMAGE.md b/README_CONTAINER_IMAGE.md
index e8e6efb79..cf3b432df 100644
--- a/README_CONTAINER_IMAGE.md
+++ b/README_CONTAINER_IMAGE.md
@@ -6,6 +6,12 @@ The image is designed to **run as a non-root user**. The container's UID is mapp
**Note**: at this time there are known issues that prevent to run this image for installation/upgrade purposes (i.e. run one of the config/upgrade playbooks) from within one of the hosts that is also an installation target at the same time: if the playbook you want to run attempts to manage the docker daemon and restart it (like install/upgrade playbooks do) this would kill the container itself during its operation.
+## A note about the name of the image
+
+The released container images for openshift-ansible follow the naming scheme determined by OpenShift's `imageConfig.format` configuration option. This means that the released image name is `openshift/origin-ansible` instead of `openshift/openshift-ansible`.
+
+This provides consistency with other images used by the platform and it's also a requirement for some use cases like using the image from [`oc cluster up`](https://github.com/openshift/origin/blob/master/docs/cluster_up_down.md).
+
## Usage
The `playbook2image` base image provides several options to control the behaviour of the containers. For more details on these options see the [playbook2image](https://github.com/openshift/playbook2image) documentation.
@@ -24,9 +30,9 @@ Here is an example of how to run a containerized `openshift-ansible` playbook th
-v $HOME/.ssh/id_rsa:/opt/app-root/src/.ssh/id_rsa:Z \
-v /etc/ansible/hosts:/tmp/inventory \
-e INVENTORY_FILE=/tmp/inventory \
- -e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml \
+ -e PLAYBOOK_FILE=playbooks/byo/openshift-checks/certificate_expiry/default.yaml \
-e OPTS="-v" -t \
- openshift/openshift-ansible
+ openshift/origin-ansible
You might want to adjust some of the options in the example to match your environment and/or preferences. For example: you might want to create a separate directory on the host where you'll copy the ssh key and inventory files prior to invocation to avoid unwanted SELinux re-labeling of the original files or paths (see below).
@@ -40,13 +46,13 @@ Here is a detailed explanation of the options used in the command above:
* `-v /etc/ansible/hosts:/tmp/inventory` and `-e INVENTORY_FILE=/tmp/inventory` mount the Ansible inventory file into the container as `/tmp/inventory` and set the corresponding environment variable to point at it respectively. The example uses `/etc/ansible/hosts` as the inventory file as this is a default location, but your inventory is likely to be elsewhere so please adjust as needed. Note that depending on the file you point to you might have to handle SELinux labels in a similar way as with the ssh keys, e.g. by adding a `:z` flag to the volume mount, so again you might prefer to copy the inventory to a dedicated location first.
-* `-e PLAYBOOK_FILE=playbooks/certificate_expiry/default.yaml` specifies the playbook to run as a relative path from the top level directory of openshift-ansible.
+* `-e PLAYBOOK_FILE=playbooks/byo/openshift-checks/certificate_expiry/default.yaml` specifies the playbook to run as a relative path from the top level directory of openshift-ansible.
* `-e OPTS="-v"` and `-t` make the output look nicer: the `default.yaml` playbook does not generate results and runs quietly unless we add the `-v` option to the `ansible-playbook` invocation, and a TTY is allocated via `-t` so that Ansible adds color to the output.
Further usage examples are available in the [examples directory](examples/) with samples of how to use the image from within OpenShift.
-Additional usage information for images built from `playbook2image` like this one can be found in the [playbook2image examples](https://github.com/aweiteka/playbook2image/tree/master/examples).
+Additional usage information for images built from `playbook2image` like this one can be found in the [playbook2image examples](https://github.com/openshift/playbook2image/tree/master/examples).
## Running openshift-ansible as a System Container
@@ -59,8 +65,8 @@ If the inventory file needs additional files then it can use the path `/var/lib/
Run the ansible system container:
```sh
-atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin openshift/openshift-ansible
-systemctl start openshift-ansible
+atomic install --system --set INVENTORY_FILE=$(pwd)/inventory.origin openshift/origin-ansible
+systemctl start origin-ansible
```
The `INVENTORY_FILE` variable says to the installer what inventory file on the host will be bind mounted inside the container. In the example above, a file called `inventory.origin` in the current directory is used as the inventory file for the installer.
@@ -68,5 +74,5 @@ The `INVENTORY_FILE` variable says to the installer what inventory file on the h
And to finally cleanup the container:
```
-atomic uninstall openshift-ansible
+atomic uninstall origin-ansible
```
diff --git a/examples/certificate-check-upload.yaml b/examples/certificate-check-upload.yaml
index 8b560447f..1794cb096 100644
--- a/examples/certificate-check-upload.yaml
+++ b/examples/certificate-check-upload.yaml
@@ -4,10 +4,10 @@
# The generated reports are uploaded to a location in the master
# hosts, using the playbook 'easy-mode-upload.yaml'.
#
-# This example uses the openshift/openshift-ansible container image.
+# This example uses the openshift/origin-ansible container image.
# (see README_CONTAINER_IMAGE.md in the top level dir for more details).
#
-# The following objects are xpected to be configured before the creation
+# The following objects are expected to be configured before the creation
# of this Job:
# - A ConfigMap named 'inventory' with a key named 'hosts' that
# contains the the Ansible inventory file
@@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: openshift-ansible
- image: openshift/openshift-ansible
+ image: openshift/origin-ansible
env:
- name: PLAYBOOK_FILE
value: playbooks/certificate_expiry/easy-mode-upload.yaml
diff --git a/examples/certificate-check-volume.yaml b/examples/certificate-check-volume.yaml
index f6613bcd8..dd0a89c8e 100644
--- a/examples/certificate-check-volume.yaml
+++ b/examples/certificate-check-volume.yaml
@@ -4,10 +4,10 @@
# The generated reports are stored in a Persistent Volume using
# the playbook 'html_and_json_timestamp.yaml'.
#
-# This example uses the openshift/openshift-ansible container image.
+# This example uses the openshift/origin-ansible container image.
# (see README_CONTAINER_IMAGE.md in the top level dir for more details).
#
-# The following objects are xpected to be configured before the creation
+# The following objects are expected to be configured before the creation
# of this Job:
# - A ConfigMap named 'inventory' with a key named 'hosts' that
# contains the the Ansible inventory file
@@ -30,7 +30,7 @@ spec:
spec:
containers:
- name: openshift-ansible
- image: openshift/openshift-ansible
+ image: openshift/origin-ansible
env:
- name: PLAYBOOK_FILE
value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
diff --git a/examples/scheduled-certcheck-upload.yaml b/examples/scheduled-certcheck-upload.yaml
index b0a97361b..05890a357 100644
--- a/examples/scheduled-certcheck-upload.yaml
+++ b/examples/scheduled-certcheck-upload.yaml
@@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: openshift-ansible
- image: openshift/openshift-ansible
+ image: openshift/origin-ansible
env:
- name: PLAYBOOK_FILE
value: playbooks/certificate_expiry/easy-mode-upload.yaml
diff --git a/examples/scheduled-certcheck-volume.yaml b/examples/scheduled-certcheck-volume.yaml
index 74cdc9e7f..2f26e8809 100644
--- a/examples/scheduled-certcheck-volume.yaml
+++ b/examples/scheduled-certcheck-volume.yaml
@@ -28,7 +28,7 @@ spec:
spec:
containers:
- name: openshift-ansible
- image: openshift/openshift-ansible
+ image: openshift/origin-ansible
env:
- name: PLAYBOOK_FILE
value: playbooks/certificate_expiry/html_and_json_timestamp.yaml
diff --git a/hack/build-images.sh b/hack/build-images.sh
index 3e9896caa..ce421178f 100755
--- a/hack/build-images.sh
+++ b/hack/build-images.sh
@@ -7,7 +7,7 @@ set -o pipefail
STARTTIME=$(date +%s)
source_root=$(dirname "${0}")/..
-prefix="openshift/openshift-ansible"
+prefix="openshift/origin-ansible"
version="latest"
verbose=false
options="-f images/installer/Dockerfile"
@@ -44,7 +44,7 @@ if [ "$help" = true ]; then
echo "Options: "
echo " --prefix=PREFIX"
echo " The prefix to use for the image names."
- echo " default: openshift/openshift-ansible"
+ echo " default: openshift/origin-ansible"
echo
echo " --version=VERSION"
echo " The version used to tag the image"
diff --git a/hack/push-release.sh b/hack/push-release.sh
index 8639143af..131ed83ca 100755
--- a/hack/push-release.sh
+++ b/hack/push-release.sh
@@ -12,7 +12,7 @@ set -o pipefail
STARTTIME=$(date +%s)
OS_ROOT=$(dirname "${BASH_SOURCE}")/..
-PREFIX="${PREFIX:-openshift/openshift-ansible}"
+PREFIX="${PREFIX:-openshift/origin-ansible}"
# Go to the top of the tree.
cd "${OS_ROOT}"
diff --git a/images/installer/Dockerfile b/images/installer/Dockerfile
index f6af018ca..915dfe377 100644
--- a/images/installer/Dockerfile
+++ b/images/installer/Dockerfile
@@ -1,11 +1,11 @@
# Using playbook2image as a base
-# See https://github.com/aweiteka/playbook2image for details on the image
+# See https://github.com/openshift/playbook2image for details on the image
# including documentation for the settings/env vars referenced below
-FROM docker.io/aweiteka/playbook2image:latest
+FROM registry.centos.org/openshift/playbook2image:latest
MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-LABEL name="openshift-ansible" \
+LABEL name="openshift/origin-ansible" \
summary="OpenShift's installation and configuration tool" \
description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
url="https://github.com/openshift/openshift-ansible" \
@@ -22,7 +22,7 @@ USER root
# configurations for the two images.
RUN mkdir -p /usr/share/ansible/ && ln -s /opt/app-root/src /usr/share/ansible/openshift-ansible
-RUN INSTALL_PKGS="skopeo" && \
+RUN INSTALL_PKGS="skopeo openssl java-1.8.0-openjdk-headless httpd-tools" && \
yum install -y --setopt=tsflags=nodocs $INSTALL_PKGS && \
rpm -V $INSTALL_PKGS && \
yum clean all
diff --git a/images/installer/Dockerfile.rhel7 b/images/installer/Dockerfile.rhel7
index 00841e660..9d7eeec24 100644
--- a/images/installer/Dockerfile.rhel7
+++ b/images/installer/Dockerfile.rhel7
@@ -2,7 +2,7 @@ FROM openshift3/playbook2image
MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
-LABEL name="openshift3/openshift-ansible" \
+LABEL name="openshift3/ose-ansible" \
summary="OpenShift's installation and configuration tool" \
description="A containerized openshift-ansible image to let you run playbooks to install, upgrade, maintain and check an OpenShift cluster" \
url="https://github.com/openshift/openshift-ansible" \
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
index 6641d6dc8..b2490638b 100644
--- a/inventory/byo/hosts.origin.example
+++ b/inventory/byo/hosts.origin.example
@@ -127,6 +127,10 @@ openshift_release=v3.6
# Alternate image format string, useful if you've got your own registry mirror
+# Configure this setting just on node or master
+#oreg_url_master=example.com/openshift3/ose-${component}:${version}
+#oreg_url_node=example.com/openshift3/ose-${component}:${version}
+# For setting the configuration globally
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
# modify image streams to point at that registry by setting the following to true
@@ -325,8 +329,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# router's default certificate.
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
#
-# Disable management of the OpenShift Router
-#openshift_hosted_manage_router=false
+# Manage the OpenShift Router
+#openshift_hosted_manage_router=true
#
# Router sharding support has been added and can be achieved by supplying the correct
# data to the inventory. The variable to house the data is openshift_hosted_routers
@@ -403,8 +407,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Validity of the auto-generated certificate in days (optional)
#openshift_hosted_registry_cert_expire_days=730
#
-# Disable management of the OpenShift Registry
-#openshift_hosted_manage_registry=false
+# Manage the OpenShift Registry
+#openshift_hosted_manage_registry=true
# Registry Storage Options
#
@@ -521,6 +525,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+# Configure the prefix and version for the component images
+#openshift_hosted_metrics_deployer_prefix=docker.io/openshift/origin-
+#openshift_hosted_metrics_deployer_version=3.6.0
# Logging deployment
#
@@ -751,6 +758,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+# Enable template service broker by specifying one of more namespaces whose
+# templates will be served by the broker
+#openshift_template_service_broker_namespaces=['openshift']
+
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
@@ -788,6 +799,35 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
#etcd_ca_default_days=1825
+# Upgrade Control
+#
+# By default nodes are upgraded in a serial manner one at a time and all failures
+# are fatal, one set of variables for normal nodes, one set of variables for
+# nodes that are part of control plane as the number of hosts may be different
+# in those two groups.
+#openshift_upgrade_nodes_serial=1
+#openshift_upgrade_nodes_max_fail_percentage=0
+#openshift_upgrade_control_plane_nodes_serial=1
+#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
+#
+# You can specify the number of nodes to upgrade at once. We do not currently
+# attempt to verify that you have capacity to drain this many nodes at once
+# so please be careful when specifying these values. You should also verify that
+# the expected number of nodes are all schedulable and ready before starting an
+# upgrade. If it's not possible to drain the requested nodes the upgrade will
+# stall indefinitely until the drain is successful.
+#
+# If you're upgrading more than one node at a time you can specify the maximum
+# percentage of failure within the batch before the upgrade is aborted. Any
+# nodes that do fail are ignored for the rest of the playbook run and you should
+# take care to investigate the failure and return the node to service so that
+# your cluster.
+#
+# The percentage must exceed the value, this would fail on two failures
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
+# where as this would not
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.ose.example
index e57b831f3..67d53b22d 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.ose.example
@@ -125,8 +125,11 @@ openshift_release=v3.6
# Tasks to run after each master is upgraded and system/services have been restarted.
# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
-
# Alternate image format string, useful if you've got your own registry mirror
+# Configure this setting just on node or master
+#oreg_url_master=example.com/openshift3/ose-${component}:${version}
+#oreg_url_node=example.com/openshift3/ose-${component}:${version}
+# For setting the configuration globally
#oreg_url=example.com/openshift3/ose-${component}:${version}
# If oreg_url points to a registry other than registry.access.redhat.com we can
# modify image streams to point at that registry by setting the following to true
@@ -325,8 +328,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# router's default certificate.
#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
#
-# Disable management of the OpenShift Router
-#openshift_hosted_manage_router=false
+# Manage the OpenShift Router (optional)
+#openshift_hosted_manage_router=true
#
# Router sharding support has been added and can be achieved by supplying the correct
# data to the inventory. The variable to house the data is openshift_hosted_routers
@@ -403,8 +406,8 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Validity of the auto-generated certificate in days (optional)
#openshift_hosted_registry_cert_expire_days=730
#
-# Disable management of the OpenShift Registry
-#openshift_hosted_manage_registry=false
+# Manage the OpenShift Registry (optional)
+#openshift_hosted_manage_registry=true
# Registry Storage Options
#
@@ -522,6 +525,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Currently, you may only alter the hostname portion of the url, alterting the
# `/hawkular/metrics` path will break installation of metrics.
#openshift_hosted_metrics_public_url=https://hawkular-metrics.example.com/hawkular/metrics
+# Configure the prefix and version for the component images
+#openshift_hosted_metrics_deployer_prefix=registry.example.com:8888/openshift3/
+#openshift_hosted_metrics_deployer_version=3.6.0
# Logging deployment
#
@@ -752,6 +758,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
+# Enable template service broker by specifying one of more namespaces whose
+# templates will be served by the broker
+#openshift_template_service_broker_namespaces=['openshift']
+
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
#openshift_master_dynamic_provisioning_enabled=False
@@ -785,6 +795,35 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#
#etcd_ca_default_days=1825
+# Upgrade Control
+#
+# By default nodes are upgraded in a serial manner one at a time and all failures
+# are fatal, one set of variables for normal nodes, one set of variables for
+# nodes that are part of control plane as the number of hosts may be different
+# in those two groups.
+#openshift_upgrade_nodes_serial=1
+#openshift_upgrade_nodes_max_fail_percentage=0
+#openshift_upgrade_control_plane_nodes_serial=1
+#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
+#
+# You can specify the number of nodes to upgrade at once. We do not currently
+# attempt to verify that you have capacity to drain this many nodes at once
+# so please be careful when specifying these values. You should also verify that
+# the expected number of nodes are all schedulable and ready before starting an
+# upgrade. If it's not possible to drain the requested nodes the upgrade will
+# stall indefinitely until the drain is successful.
+#
+# If you're upgrading more than one node at a time you can specify the maximum
+# percentage of failure within the batch before the upgrade is aborted. Any
+# nodes that do fail are ignored for the rest of the playbook run and you should
+# take care to investigate the failure and return the node to service so that
+# your cluster.
+#
+# The percentage must exceed the value, this would fail on two failures
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
+# where as this would not
+# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
+
# host group for masters
[masters]
ose3-master[1:3]-ansible.test.example.com
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index 19e6356e7..5deb575b5 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -9,7 +9,7 @@
%global __requires_exclude ^/usr/bin/ansible-playbook$
Name: openshift-ansible
-Version: 3.6.68
+Version: 3.6.98
Release: 1%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
@@ -161,23 +161,29 @@ BuildArch: noarch
%files playbooks
%{_datadir}/ansible/%{name}/playbooks
-# We moved playbooks/common/openshift-master/library up to the top and replaced
-# it with a symlink. RPM doesn't handle this so we have to do some pre-transaction
-# magic. See https://fedoraproject.org/wiki/Packaging:Directory_Replacement
+# Along the history of openshift-ansible, some playbook directories had to be
+# moved and were replaced with symlinks for backwards compatibility.
+# RPM doesn't handle this so we have to do some pre-transaction magic.
+# See https://fedoraproject.org/wiki/Packaging:Directory_Replacement
%pretrans playbooks -p <lua>
--- Define the path to directory being replaced below.
+-- Define the paths to directories being replaced below.
-- DO NOT add a trailing slash at the end.
-path = "/usr/share/ansible/openshift-ansible/playbooks/common/openshift-master/library"
-st = posix.stat(path)
-if st and st.type == "directory" then
- status = os.rename(path, path .. ".rpmmoved")
- if not status then
- suffix = 0
- while not status do
- suffix = suffix + 1
- status = os.rename(path .. ".rpmmoved", path .. ".rpmmoved." .. suffix)
+dirs_to_sym = {
+ "/usr/share/ansible/openshift-ansible/playbooks/common/openshift-master/library",
+ "/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry"
+}
+for i,path in ipairs(dirs_to_sym) do
+ st = posix.stat(path)
+ if st and st.type == "directory" then
+ status = os.rename(path, path .. ".rpmmoved")
+ if not status then
+ suffix = 0
+ while not status do
+ suffix = suffix + 1
+ status = os.rename(path .. ".rpmmoved", path .. ".rpmmoved." .. suffix)
+ end
+ os.rename(path, path .. ".rpmmoved")
end
- os.rename(path, path .. ".rpmmoved")
end
end
@@ -274,6 +280,225 @@ Atomic OpenShift Utilities includes
%changelog
+* Fri Jun 09 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.98-1
+-
+
+* Fri Jun 09 2017 Scott Dodson <sdodson@redhat.com> 3.6.97-1
+- Updated to using oo_random_word for secret gen (ewolinet@redhat.com)
+- Updating kibana to store session and oauth secrets for reuse, fix oauthclient
+ generation for ops (ewolinet@redhat.com)
+
+* Thu Jun 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.5-1
+- Rename container image to origin-ansible / ose-ansible (pep@redhat.com)
+
+* Thu Jun 08 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.4-1
+- Guard check for container install based on openshift dictionary key
+ (ayoung@redhat.com)
+- Separate client config removal in uninstall s.t. ansible_ssh_user is removed
+ from with_items. (abutcher@redhat.com)
+- Remove supported/implemented barrier for registry object storage providers.
+ (abutcher@redhat.com)
+- Add node unit file on upgrade (smilner@redhat.com)
+- fix up openshift-ansible for use with 'oc cluster up' (jcantril@redhat.com)
+- specify all logging index mappings for kibana (jcantril@redhat.com)
+- openshift-master: set r_etcd_common_etcd_runtime (gscrivan@redhat.com)
+- rename daemon.json to container-daemon.json (smilner@redhat.com)
+- Updating probe timeout and exposing variable to adjust timeout in image
+ (ewolinet@redhat.com)
+- Do not attempt to override openstack nodename (jdetiber@redhat.com)
+- Update image stream to openshift/origin:2c55ade (skuznets@redhat.com)
+
+* Wed Jun 07 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.3-1
+- Use local openshift.master.loopback_url when generating initial master
+ loopback kubeconfigs. (abutcher@redhat.com)
+
+* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.2-1
+-
+
+* Tue Jun 06 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.89.1-1
+- Updating image for registry_console (ewolinet@redhat.com)
+- add elasticseatch, fluentd, kibana check (jvallejo@redhat.com)
+- show correct default value in inventory (mmckinst@redhat.com)
+- Skip service restarts within ca redeployment playbook when expired
+ certificates are detected. (abutcher@redhat.com)
+- Add mtu setting to /etc/sysconfig/docker-network (sdodson@redhat.com)
+- Add daemon_reload parameter to service tasks (tbielawa@redhat.com)
+- mux uses fluentd cert/key to talk to ES (rmeggins@redhat.com)
+- fix curator host, port params; remove curator es volumes
+ (rmeggins@redhat.com)
+- add mux docs; allow to specify mux namespaces (rmeggins@redhat.com)
+- oc_secret: allow for specifying secret type (jarrpa@redhat.com)
+- Revert "Merge pull request #4271 from DG-i/master" (skuznets@redhat.com)
+- verify upgrade targets separately for each group (masters, nodes, etcd)
+ (jchaloup@redhat.com)
+- Updating Kibana-proxy secret key name, fixing deleting secrets, fixed extra
+ ES dc creation (ewolinet@redhat.com)
+- upgrade: Reload systemd before restart (smilner@redhat.com)
+- Skip router/registry cert redeploy when
+ openshift_hosted_manage_{router,registry}=false (abutcher@redhat.com)
+- disable docker excluder before it is updated to remove older excluded
+ packages (jchaloup@redhat.com)
+- Support byo etcd for calico (djosborne10@gmail.com)
+- preflight int tests: fix for package_version changes (lmeyer@redhat.com)
+- Remove unnecessary comment. (rhcarvalho@gmail.com)
+- update aos_version module to support generic pkgs and versions
+ (jvallejo@redhat.com)
+- Add separate variables for control plane nodes (sdodson@redhat.com)
+- Copy Nuage VSD generated user certificates to Openshift master nodes
+ (sneha.deshpande@nokia.com)
+- add existing_ovs_version check (jvallejo@redhat.com)
+- Tolerate failures in the node upgrade playbook (sdodson@redhat.com)
+
+* Wed May 31 2017 Scott Dodson <sdodson@redhat.com> 3.6.89.0-1
+- AMP 2.0 (sdodson@redhat.com)
+- add support for oc_service for labels, externalIPs (rmeggins@redhat.com)
+- [JMAN4-161] Add templates and pv example for cloudforms jboss middleware
+ manager (pgier@redhat.com)
+
+* Wed May 31 2017 Scott Dodson <sdodson@redhat.com> 3.6.89-1
+- Adding default value for openshift_hosted_logging_storage_kind
+ (ewolinet@redhat.com)
+- memory check: use GiB/MiB and adjust memtotal (lmeyer@redhat.com)
+- bool (sdodson@redhat.com)
+- Metrics: update the imagePullPolicy to be always (mwringe@redhat.com)
+- Remove typos that got reintroduced (smilner@redhat.com)
+- oc_atomic_container: Workaround for invalid json from atomic command
+ (smilner@redhat.com)
+- Remove system-package=no from container-engine install (smilner@redhat.com)
+- oc_atomic_container: Hard code system-package=no (smilner@redhat.com)
+- Updating to generate PVC when storage type is passed in as nfs
+ (ewolinet@redhat.com)
+- disable become for local actions (Mathias.Merscher@dg-i.net)
+- check for rpm version and docker image version equality only if
+ openshift_pkg_version and openshift_image_tag are not defined
+ (jchaloup@redhat.com)
+
+* Tue May 30 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.86-1
+- Reduce memory requirement to 2gb for fedora ci jobs (sdodson@redhat.com)
+- openshift_logging: increasing *_elasticsearch_* default CPU and memory
+ (jwozniak@redhat.com)
+- Updating python-passlib assert (ewolinet@redhat.com)
+- allow to configure oreg_url specifically for node or master. refs #4233
+ (tobias@tobru.ch)
+- Updating registry-console version to be v3.6 instead of 3.6
+ (ewolinet@redhat.com)
+
+* Thu May 25 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.85-1
+- Prepending v to registry-console version (ewolinet@redhat.com)
+- memory health check: adjust threshold for etcd (lmeyer@redhat.com)
+- health checks: specify check skip reason (lmeyer@redhat.com)
+- health checks: configure failure output in playbooks (lmeyer@redhat.com)
+- disk/memory checks: make threshold configurable (lmeyer@redhat.com)
+- Show help on how to disable checks after failure (rhcarvalho@gmail.com)
+- Allow disabling checks via Ansible variable (rhcarvalho@gmail.com)
+- Verify memory and disk requirements before install (rhcarvalho@gmail.com)
+- filter_plugins: Allow for multiple pairs in map_from_pairs()
+ (jarrpa@redhat.com)
+
+* Wed May 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.84-1
+- oc_process: Better error output on failed template() call (jarrpa@redhat.com)
+
+* Wed May 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.83-1
+- Allow a hostname to resolve to 127.0.0.1 during validation (dms@redhat.com)
+
+* Wed May 24 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.82-1
+- Fixing tux warnings and some final clean up (ewolinet@redhat.com)
+- Appease travis (sdodson@redhat.com)
+- preflight int tests: fix test flake (lmeyer@redhat.com)
+- Add a readiness probe to the Kibana container (skuznets@redhat.com)
+- Create logging deployments with non-zero replica counts (skuznets@redhat.com)
+- Pulling changes from master branch (ewolinet@redhat.com)
+- Adding some missing changes (ewolinet@redhat.com)
+- fixing available variables for 2.3.0 (ewolinet@redhat.com)
+- Updating pvc generation names (ewolinet@redhat.com)
+- updating delete_logging to use modules (ewolinet@redhat.com)
+- Pulling in changes from master (ewolinet@redhat.com)
+- Decomposing openshift_logging role into subcomponent roles
+ (ewolinet@redhat.com)
+- Fix renaming error with calico template files (djosborne10@gmail.com)
+
+* Tue May 23 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.80-1
+- RPM workaround for the move of cert playbooks (pep@redhat.com)
+- health check playbooks: relocate and expand (lmeyer@redhat.com)
+
+* Tue May 23 2017 Scott Dodson <sdodson@redhat.com> 3.6.69-1
+- preflight int tests: fix for openshift_version dep (lmeyer@redhat.com)
+- Removing requirement to pass aws credentials (esauer@redhat.com)
+- Workaround sysctl module issue with py3 by converting task to lineinfile.
+ (abutcher@redhat.com)
+- inventory: rename certificates->certificate in router example
+ (smilner@redhat.com)
+- remove skopeo dependency on docker-py (jvallejo@redhat.com)
+- improve error handling for missing vars (jvallejo@redhat.com)
+- lib/base: Allow for more complex template params (jarrpa@redhat.com)
+- Fix yamllint problems (sdodson@redhat.com)
+- add ability to expose Elasticsearch as an external route
+ (rmeggins@redhat.com)
+- Parameterized Calico/Node Arguments (vincent.schwarzer@yahoo.de)
+- Fix auditConfig for non-HA environments (rteague@redhat.com)
+- Added Docker Registry Port 5000 to Firewalld (vincent.schwarzer@yahoo.de)
+- Added Calicoctl to deployment of Master Nodes (vincent.schwarzer@yahoo.de)
+- move etcd upgrade related code into etcd_upgrade role (jchaloup@redhat.com)
+- Localhost TMP Dir Fix (vincent.schwarzer@yahoo.de)
+- Adjusted Naming Schema of Calico Roles (vincent.schwarzer@yahoo.de)
+- Update hosts.*.example to include openshift_hosted_metrics_deployer_version
+ (pat2man@gmail.com)
+- Fix gpg key path in our repo (sdodson@redhat.com)
+- Uninstall: restart docker when container-engine restart hasn't changed.
+ (abutcher@redhat.com)
+- add etcd cluster size check (jvallejo@redhat.com)
+- fix etcd_container_version detection (jchaloup@redhat.com)
+- systemcontainercustom.conf.j2: use Environment instead of ENVIRONMENT
+ (gscrivan@redhat.com)
+- node, systemd: change Requires to Wants for openvswitch (gscrivan@redhat.com)
+- Add teams attribute to github identity provider (dms@redhat.com)
+- Don't escalate privileges in local tmpdir creation (skuznets@redhat.com)
+- Remove use of local_action with delegate_to and switch 'delegate_to:
+ localhost' temporary directory cleanup actions to local_actions.
+ (abutcher@redhat.com)
+- Rework openshift_excluders role (rteague@redhat.com)
+- Add regexp for container-engine lineinfile (smilner@redhat.com)
+- Default image policy on new clusters to on (ccoleman@redhat.com)
+- revert role-specific var name (jvallejo@redhat.com)
+- Filter non-strings from the oc_adm_ca_server_cert hostnames parameter.
+ (abutcher@redhat.com)
+- Don't set-up origin repositories if they've already been configured
+ (dms@redhat.com)
+- byo inventory versions 1.5 -> 3.6 (smilner@redhat.com)
+- byo inventory versions 3.5 -> 3.6 (smilner@redhat.com)
+- use dest instead of path for lineinfile (smilner@redhat.com)
+- openshift_version: skip rpm version==image version on Atomic
+ (gscrivan@redhat.com)
+- Add NO_PROXY workaround for container-engine atomic command
+ (smilner@redhat.com)
+- Add no_proxy to atomic.conf (smilner@redhat.com)
+- Include object validation in 3.6 upgrades (sdodson@redhat.com)
+- uninstall: handle container-engine (gscrivan@redhat.com)
+- Added Calico BGP Port 179 to Firewalld (vincent.schwarzer@yahoo.de)
+- Fixed for python3 with Fedora 25 Atomic (donny@fortnebula.com)
+- Add docker package for container-engine install (smilner@redhat.com)
+- Fix python3 error in repoquery (jpeeler@redhat.com)
+- check if hostname is in list of etcd hosts (jvallejo@redhat.com)
+- Fix templating of static service files (rteague@redhat.com)
+- Fix container image build references (pep@redhat.com)
+- Reset selinux context on /var/lib/origin/openshift.common.volumes
+ (sdodson@redhat.com)
+- Adding assert to check for python-passlib on control host
+ (ewolinet@redhat.com)
+- Update variable name to standard (rhcarvalho@gmail.com)
+- Make class attribute name shorter (rhcarvalho@gmail.com)
+- Add module docstring (rhcarvalho@gmail.com)
+- Update check (rhcarvalho@gmail.com)
+- Change based on feedback (vincent.schwarzer@yahoo.de)
+- Removed Hardcoded Calico URLs (vincent.schwarzer@yahoo.de)
+- int -> float (rhcarvalho@gmail.com)
+- Remove vim line (rhcarvalho@gmail.com)
+- add etcd volume check (jvallejo@redhat.com)
+- Added additional Calico Network Plugin Checks (vincent.schwarzer@yahoo.de)
+- Ensure good return code for specific until loops (smilner@redhat.com)
+- add template service broker configurable (jminter@redhat.com)
+- Prevent line wrap in yaml dump of IDP, fixes #3912 (rikkuness@gmail.com)
+
* Sat May 13 2017 Jenkins CD Merge Bot <tdawson@redhat.com> 3.6.68-1
- Updating registry-console image version during a post_control_plane upgrade
(ewolinet@redhat.com)
diff --git a/playbooks/adhoc/uninstall.yml b/playbooks/adhoc/uninstall.yml
index 1c8257162..97d835eae 100644
--- a/playbooks/adhoc/uninstall.yml
+++ b/playbooks/adhoc/uninstall.yml
@@ -393,10 +393,19 @@
- "{{ directories.results | default([]) }}"
- files
+ - set_fact:
+ client_users: "{{ [ansible_ssh_user, 'root'] | unique }}"
+
+ - name: Remove client kubeconfigs
+ file:
+ path: "~{{ item }}/.kube"
+ state: absent
+ with_items:
+ - "{{ client_users }}"
+
- name: Remove remaining files
file: path={{ item }} state=absent
with_items:
- - "~{{ ansible_ssh_user }}/.kube"
- /etc/ansible/facts.d/openshift.fact
- /etc/atomic-enterprise
- /etc/corosync
@@ -421,7 +430,6 @@
- /etc/sysconfig/origin-master
- /etc/sysconfig/origin-master-api
- /etc/sysconfig/origin-master-controllers
- - /root/.kube
- /usr/share/openshift/examples
- /var/lib/atomic-enterprise
- /var/lib/openshift
diff --git a/playbooks/byo/openshift-checks/README.md b/playbooks/byo/openshift-checks/README.md
new file mode 100644
index 000000000..4b2ff1f94
--- /dev/null
+++ b/playbooks/byo/openshift-checks/README.md
@@ -0,0 +1,65 @@
+# OpenShift health checks
+
+This directory contains Ansible playbooks for detecting potential problems prior
+to an install, as well as health checks to run on existing OpenShift clusters.
+
+Ansible's default operation mode is to fail fast, on the first error. However,
+when performing checks, it is useful to gather as much information about
+problems as possible in a single run.
+
+Thus, the playbooks run a battery of checks against the inventory hosts and have
+Ansible gather intermediate errors, giving a more complete diagnostic of the
+state of each host. If any check failed, the playbook run will be marked as
+failed.
+
+To facilitate understanding the problems that were encountered, a custom
+callback plugin summarizes execution errors at the end of a playbook run.
+
+# Available playbooks
+
+1. Pre-install playbook ([pre-install.yml](pre-install.yml)) - verifies system
+ requirements and look for common problems that can prevent a successful
+ installation of a production cluster.
+
+2. Diagnostic playbook ([health.yml](health.yml)) - check an existing cluster
+ for known signs of problems.
+
+3. Certificate expiry playbooks ([certificate_expiry](certificate_expiry)) -
+ check that certificates in use are valid and not expiring soon.
+
+## Running
+
+With a [recent installation of Ansible](../../../README.md#setup), run the playbook
+against your inventory file. Here is the step-by-step:
+
+1. If you haven't done it yet, clone this repository:
+
+ ```console
+ $ git clone https://github.com/openshift/openshift-ansible
+ $ cd openshift-ansible
+ ```
+
+2. Run the appropriate playbook:
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/pre-install.yml
+ ```
+
+ or
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/health.yml
+ ```
+
+ or
+
+ ```console
+ $ ansible-playbook -i <inventory file> playbooks/byo/openshift-checks/certificate_expiry/default.yaml -v
+ ```
+
+## Running via Docker image
+
+This repository is built into a Docker image including Ansible so that it can
+be run anywhere Docker is available. Instructions for doing so may be found
+[in the README](../../README_CONTAINER_IMAGE.md).
+
diff --git a/playbooks/certificate_expiry/default.yaml b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
index 630135cae..630135cae 100644
--- a/playbooks/certificate_expiry/default.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
diff --git a/playbooks/certificate_expiry/easy-mode-upload.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
index 378d1f154..378d1f154 100644
--- a/playbooks/certificate_expiry/easy-mode-upload.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
diff --git a/playbooks/certificate_expiry/easy-mode.yaml b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
index ae41c7c14..ae41c7c14 100644
--- a/playbooks/certificate_expiry/easy-mode.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
diff --git a/playbooks/certificate_expiry/html_and_json_default_paths.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
index d80cb6ff4..d80cb6ff4 100644
--- a/playbooks/certificate_expiry/html_and_json_default_paths.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
diff --git a/playbooks/certificate_expiry/html_and_json_timestamp.yaml b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
index 2189455b7..2189455b7 100644
--- a/playbooks/certificate_expiry/html_and_json_timestamp.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
diff --git a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
index 87a0f3be4..87a0f3be4 100644
--- a/playbooks/certificate_expiry/longer-warning-period-json-results.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
diff --git a/playbooks/certificate_expiry/longer_warning_period.yaml b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
index 960457c4b..960457c4b 100644
--- a/playbooks/certificate_expiry/longer_warning_period.yaml
+++ b/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
diff --git a/playbooks/byo/openshift-checks/certificate_expiry/roles b/playbooks/byo/openshift-checks/certificate_expiry/roles
new file mode 120000
index 000000000..4bdbcbad3
--- /dev/null
+++ b/playbooks/byo/openshift-checks/certificate_expiry/roles
@@ -0,0 +1 @@
+../../../../roles \ No newline at end of file
diff --git a/playbooks/byo/openshift-checks/health.yml b/playbooks/byo/openshift-checks/health.yml
new file mode 100644
index 000000000..dfc1a7db0
--- /dev/null
+++ b/playbooks/byo/openshift-checks/health.yml
@@ -0,0 +1,3 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+- include: ../../common/openshift-checks/health.yml
diff --git a/playbooks/byo/openshift-checks/pre-install.yml b/playbooks/byo/openshift-checks/pre-install.yml
new file mode 100644
index 000000000..5e8c3ab9b
--- /dev/null
+++ b/playbooks/byo/openshift-checks/pre-install.yml
@@ -0,0 +1,3 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+- include: ../../common/openshift-checks/pre-install.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index acf5469bf..fd4a9eb26 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -3,6 +3,19 @@
tags:
- always
+- name: Verify Requirements
+ hosts: OSEv3
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "install"
+ post_tasks:
+ - action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+
- include: ../../common/openshift-cluster/std_include.yml
tags:
- always
diff --git a/playbooks/byo/openshift-cluster/redeploy-certificates.yml b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
index 012ce69ec..a3894e243 100644
--- a/playbooks/byo/openshift-cluster/redeploy-certificates.yml
+++ b/playbooks/byo/openshift-cluster/redeploy-certificates.yml
@@ -20,5 +20,7 @@
- include: ../../common/openshift-node/restart.yml
- include: ../../common/openshift-cluster/redeploy-certificates/router.yml
+ when: openshift_hosted_manage_router | default(true) | bool
- include: ../../common/openshift-cluster/redeploy-certificates/registry.yml
+ when: openshift_hosted_manage_registry | default(true) | bool
diff --git a/playbooks/byo/openshift-preflight/README.md b/playbooks/byo/openshift-preflight/README.md
deleted file mode 100644
index b50292eac..000000000
--- a/playbooks/byo/openshift-preflight/README.md
+++ /dev/null
@@ -1,43 +0,0 @@
-# OpenShift preflight checks
-
-Here we provide an Ansible playbook for detecting potential roadblocks prior to
-an install or upgrade.
-
-Ansible's default operation mode is to fail fast, on the first error. However,
-when performing checks, it is useful to gather as much information about
-problems as possible in a single run.
-
-The `check.yml` playbook runs a battery of checks against the inventory hosts
-and tells Ansible to ignore intermediate errors, thus giving a more complete
-diagnostic of the state of each host. Still, if any check failed, the playbook
-run will be marked as having failed.
-
-To facilitate understanding the problems that were encountered, we provide a
-custom callback plugin to summarize execution errors at the end of a playbook
-run.
-
----
-
-*Note that currently the `check.yml` playbook is only useful for RPM-based
-installations. Containerized installs are excluded from checks for now, but
-might be included in the future if there is demand for that.*
-
----
-
-## Running
-
-With an installation of Ansible 2.2 or greater, run the playbook directly
-against your inventory file. Here is the step-by-step:
-
-1. If you haven't done it yet, clone this repository:
-
- ```console
- $ git clone https://github.com/openshift/openshift-ansible
- $ cd openshift-ansible
- ```
-
-2. Run the playbook:
-
- ```console
- $ ansible-playbook -i <inventory file> playbooks/byo/openshift-preflight/check.yml
- ```
diff --git a/playbooks/byo/openshift-preflight/check.yml b/playbooks/byo/openshift-preflight/check.yml
index eb763221f..2e53452a6 100644
--- a/playbooks/byo/openshift-preflight/check.yml
+++ b/playbooks/byo/openshift-preflight/check.yml
@@ -1,15 +1,3 @@
---
-- include: ../openshift-cluster/initialize_groups.yml
-
-- name: Run OpenShift health checks
- # Temporarily reverting to OSEv3 until group standardization is complete
- hosts: OSEv3
- roles:
- - openshift_health_checker
- post_tasks:
- # NOTE: we need to use the old "action: name" syntax until
- # https://github.com/ansible/ansible/issues/20513 is fixed.
- - action: openshift_health_check
- args:
- checks:
- - '@preflight'
+# location is moved; this file remains so existing instructions keep working
+- include: ../openshift-checks/pre-install.yml
diff --git a/playbooks/certificate_expiry b/playbooks/certificate_expiry
new file mode 120000
index 000000000..9cf5334a1
--- /dev/null
+++ b/playbooks/certificate_expiry
@@ -0,0 +1 @@
+byo/openshift-checks/certificate_expiry/ \ No newline at end of file
diff --git a/playbooks/certificate_expiry/roles b/playbooks/certificate_expiry/roles
deleted file mode 120000
index b741aa3db..000000000
--- a/playbooks/certificate_expiry/roles
+++ /dev/null
@@ -1 +0,0 @@
-../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
new file mode 100644
index 000000000..1bee460e8
--- /dev/null
+++ b/playbooks/common/openshift-checks/health.yml
@@ -0,0 +1,11 @@
+---
+- name: Run OpenShift health checks
+ hosts: OSEv3
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "health"
+ post_tasks:
+ - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ args:
+ checks: ['@health']
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
new file mode 100644
index 000000000..e01c6f38d
--- /dev/null
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -0,0 +1,11 @@
+---
+- hosts: OSEv3
+ name: run OpenShift pre-install checks
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: "pre-install"
+ post_tasks:
+ - action: openshift_health_check # https://github.com/ansible/ansible/issues/20513
+ args:
+ checks: ['@preflight']
diff --git a/playbooks/byo/openshift-preflight/roles b/playbooks/common/openshift-checks/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/byo/openshift-preflight/roles
+++ b/playbooks/common/openshift-checks/roles
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
index 0d0ff798c..0d94a011a 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/ca.yml
@@ -6,6 +6,18 @@
msg: "The current OpenShift version is less than 1.2/3.2 and does not support CA bundles."
when: not openshift.common.version_gte_3_2_or_1_2 | bool
+- name: Check cert expirys
+ hosts: oo_nodes_to_config:oo_etcd_to_config:oo_masters_to_config
+ vars:
+ openshift_certificate_expiry_show_all: yes
+ roles:
+ # Sets 'check_results' per host which contains health status for
+ # etcd, master and node certificates. We will use 'check_results'
+ # to determine if any certificates were expired prior to running
+ # this playbook. Service restarts will be skipped if any
+ # certificates were previously expired.
+ - role: openshift_certificate_expiry
+
- name: Backup existing etcd CA certificate directories
hosts: oo_etcd_to_config
roles:
@@ -134,6 +146,11 @@
changed_when: false
- include: ../../openshift-etcd/restart.yml
+ # Do not restart etcd when etcd certificates were previously expired.
+ when: ('expired' not in (hostvars
+ | oo_select_keys(groups['etcd'])
+ | oo_collect('check_results.check_results.etcd')
+ | oo_collect('health')))
# Update master config when ca-bundle not referenced. Services will be
# restarted below after new CA certificate has been distributed.
@@ -326,6 +343,16 @@
with_items: "{{ client_users }}"
- include: ../../openshift-master/restart.yml
+ # Do not restart masters when master certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/master.server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_masters_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_first_master.0].openshift.common.config_base ~ "/master/ca-bundle.crt"}))
- name: Distribute OpenShift CA certificate to nodes
hosts: oo_nodes_to_config
@@ -375,3 +402,13 @@
changed_when: false
- include: ../../openshift-node/restart.yml
+ # Do not restart nodes when node certificates were previously expired.
+ when: ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/server.crt"}))
+ and
+ ('expired' not in hostvars
+ | oo_select_keys(groups['oo_nodes_to_config'])
+ | oo_collect('check_results.check_results.ocp_certs')
+ | oo_collect('health', {'path':hostvars[groups.oo_nodes_to_config.0].openshift.common.config_base ~ "/node/ca.crt"}))
diff --git a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
index 7988e97ab..a66301c0d 100644
--- a/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
+++ b/playbooks/common/openshift-cluster/upgrades/disable_node_excluders.yml
@@ -1,6 +1,6 @@
---
- name: Disable excluders
- hosts: oo_nodes_to_config
+ hosts: oo_nodes_to_upgrade:!oo_masters_to_config
gather_facts: no
roles:
- role: openshift_excluder
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 9b76f1dd0..d9ddf3860 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -9,8 +9,8 @@
replace ( '${version}', openshift_image_tag ) }}"
router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) |
replace ( '${version}', openshift_image_tag ) }}"
- registry_console_image: "{{ openshift.master.registry_url | replace ( '${component}', 'registry-console') |
- replace ( '${version}', openshift.common.short_version ) }}"
+ registry_console_image: "{{ openshift.master.registry_url | regex_replace ( '(origin|ose)-\\${component}', 'registry-console') |
+ replace ( '${version}', 'v' ~ openshift.common.short_version ) }}"
pre_tasks:
- name: Load lib_openshift modules
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
index 7646e0fa6..9d8b73cff 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_docker_upgrade_targets.yml
@@ -1,23 +1,20 @@
---
-- name: Verify docker upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
- tasks:
- # Only check if docker upgrade is required if docker_upgrade is not
- # already set to False.
- - include: ../docker/upgrade_check.yml
- when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
+# Only check if docker upgrade is required if docker_upgrade is not
+# already set to False.
+- include: ../docker/upgrade_check.yml
+ when: docker_upgrade is not defined or docker_upgrade | bool and not openshift.common.is_atomic | bool
- # Additional checks for Atomic hosts:
+# Additional checks for Atomic hosts:
- - name: Determine available Docker
- shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
- register: g_atomic_docker_version_result
- when: openshift.common.is_atomic | bool
+- name: Determine available Docker
+ shell: "rpm -q --queryformat '---\ncurr_version: %{VERSION}\navail_version: \n' docker"
+ register: g_atomic_docker_version_result
+ when: openshift.common.is_atomic | bool
- - set_fact:
- l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
- when: openshift.common.is_atomic | bool
+- set_fact:
+ l_docker_version: "{{ g_atomic_docker_version_result.stdout | from_yaml }}"
+ when: openshift.common.is_atomic | bool
- - fail:
- msg: This playbook requires access to Docker 1.12 or later
- when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
+- fail:
+ msg: This playbook requires access to Docker 1.12 or later
+ when: openshift.common.is_atomic | bool and l_docker_version.avail_version | default(l_docker_version.curr_version, true) | version_compare('1.12','<')
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 6a9f88707..1b437dce9 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -1,45 +1,41 @@
---
-- name: Verify upgrade targets
- hosts: oo_masters_to_config:oo_nodes_to_upgrade
+- name: Fail when OpenShift is not installed
+ fail:
+ msg: Verify OpenShift is already installed
+ when: openshift.common.version is not defined
- tasks:
- - name: Fail when OpenShift is not installed
- fail:
- msg: Verify OpenShift is already installed
- when: openshift.common.version is not defined
+- name: Verify containers are available for upgrade
+ command: >
+ docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ register: pull_result
+ changed_when: "'Downloaded newer image' in pull_result.stdout"
+ when: openshift.common.is_containerized | bool
- - name: Verify containers are available for upgrade
+- when: not openshift.common.is_containerized | bool
+ block:
+ - name: Check latest available OpenShift RPM version
command: >
- docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
- register: pull_result
- changed_when: "'Downloaded newer image' in pull_result.stdout"
- when: openshift.common.is_containerized | bool
-
- - when: not openshift.common.is_containerized | bool
- block:
- - name: Check latest available OpenShift RPM version
- command: >
- {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
- failed_when: false
- changed_when: false
- register: avail_openshift_version
-
- - name: Fail when unable to determine available OpenShift RPM version
- fail:
- msg: "Unable to determine available OpenShift RPM version"
- when:
- - avail_openshift_version.stdout == ''
+ {{ repoquery_cmd }} --qf '%{version}' "{{ openshift.common.service_type }}"
+ failed_when: false
+ changed_when: false
+ register: avail_openshift_version
- - name: Verify OpenShift RPMs are available for upgrade
- fail:
- msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
- when:
- - not avail_openshift_version | skipped
- - avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+ - name: Fail when unable to determine available OpenShift RPM version
+ fail:
+ msg: "Unable to determine available OpenShift RPM version"
+ when:
+ - avail_openshift_version.stdout == ''
- - name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ - name: Verify OpenShift RPMs are available for upgrade
fail:
- msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ msg: "OpenShift {{ avail_openshift_version.stdout }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - deployment_type == 'origin'
- - openshift.common.version | version_compare(openshift_upgrade_min,'<')
+ - not avail_openshift_version | skipped
+ - avail_openshift_version.stdout | default('0.0', True) | version_compare(openshift_release, '<')
+
+- name: Fail when openshift version does not meet minium requirement for Origin upgrade
+ fail:
+ msg: "This upgrade playbook must be run against OpenShift {{ openshift_upgrade_min }} or later"
+ when:
+ - deployment_type == 'origin'
+ - openshift.common.version | version_compare(openshift_upgrade_min,'<')
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index 0ad934d2d..b980909eb 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -107,8 +107,8 @@
yedit:
src: "{{ openshift.common.config_base }}/master/master-config.yaml"
key: 'imageConfig.format'
- value: "{{ oreg_url }}"
- when: oreg_url is defined
+ value: "{{ oreg_url | default(oreg_url_master) }}"
+ when: oreg_url is defined or oreg_url_master is defined
# Run the upgrade hook prior to restarting services/system if defined:
- debug: msg="Running master upgrade hook {{ openshift_master_upgrade_hook }}"
@@ -247,8 +247,8 @@
hosts: oo_masters_to_config:&oo_nodes_to_upgrade
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
- serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ serial: "{{ openshift_upgrade_control_plane_nodes_serial | default(1) }}"
+ max_fail_percentage: "{{ openshift_upgrade_control_plane_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
index 4d455fe0a..91dbc2cd4 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_nodes.yml
@@ -4,7 +4,7 @@
# This var must be set with -e on invocation, as it is not a per-host inventory var
# and is evaluated early. Values such as "20%" can also be used.
serial: "{{ openshift_upgrade_nodes_serial | default(1) }}"
- any_errors_fatal: true
+ max_fail_percentage: "{{ openshift_upgrade_nodes_max_fail_percentage | default(0) }}"
pre_tasks:
- name: Load lib_openshift modules
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
index d81a13ef2..f1245aa2e 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade.yml
@@ -78,11 +78,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index 8a692d02b..b693ab55c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -82,11 +82,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
index 2d30bba94..4fd029107 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_nodes.yml
@@ -79,11 +79,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
index e9ff47f32..965e39482 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade.yml
@@ -78,11 +78,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index d4ae8d8b4..7830f462c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -82,11 +82,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
index ae205b172..4364ff8e3 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_nodes.yml
@@ -79,11 +79,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 1269634d1..e63b03e51 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -78,11 +78,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index 21c075678..21e1d440d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -82,11 +82,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
index e67e169fc..036d3fcf5 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_nodes.yml
@@ -79,11 +79,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index a1b1f3301..5d41b84d0 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -78,11 +78,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_nodes_to_upgrade:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index af6e1f71b..e34259b00 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -82,11 +82,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_masters_to_config
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_masters_to_config:oo_etcd_to_config
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
index 285c18b7b..25eceaf90 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_nodes.yml
@@ -79,11 +79,17 @@
tags:
- pre_upgrade
-- include: ../pre/verify_upgrade_targets.yml
+- name: Verify upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_upgrade_targets.yml
tags:
- pre_upgrade
-- include: ../pre/verify_docker_upgrade_targets.yml
+- name: Verify docker upgrade targets
+ hosts: oo_nodes_to_upgrade
+ tasks:
+ - include: ../pre/verify_docker_upgrade_targets.yml
tags:
- pre_upgrade
diff --git a/playbooks/common/openshift-cluster/validate_hostnames.yml b/playbooks/common/openshift-cluster/validate_hostnames.yml
index 48cc03b19..33fc5630f 100644
--- a/playbooks/common/openshift-cluster/validate_hostnames.yml
+++ b/playbooks/common/openshift-cluster/validate_hostnames.yml
@@ -13,4 +13,6 @@
pause:
prompt: "The hostname \"{{ openshift.common.hostname }}\" for \"{{ ansible_nodename }}\" doesn't resolve to an ip address owned by this host. Please set openshift_hostname variable to a hostname that when resolved on the host in question resolves to an IP address matching an interface on this host. This host will fail liveness checks for pods utilizing hostPorts, press ENTER to continue or CTRL-C to abort."
seconds: "{{ 10 if openshift_override_hostname_check | default(false) | bool else omit }}"
- when: lookupip.stdout not in ansible_all_ipv4_addresses
+ when:
+ - lookupip.stdout != '127.0.0.1'
+ - lookupip.stdout not in ansible_all_ipv4_addresses
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 60cf56108..ddc4db8f8 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -117,6 +117,7 @@
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_hosts: "{{ groups.oo_masters_to_config }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 792ffb4e2..acebabc91 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -32,7 +32,7 @@
ansible_ssh_user: "{{ g_ssh_user | default(omit) }}"
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: "{{ groups.oo_nodes_to_config | default([]) }}"
- when: hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
+ when: hostvars[item].openshift is defined and hostvars[item].openshift.common is defined and hostvars[item].openshift.common.is_containerized | bool and (item in groups.oo_nodes_to_config and item in groups.oo_masters_to_config)
changed_when: False
- name: Configure containerized nodes
diff --git a/playbooks/libvirt/openshift-cluster/config.yml b/playbooks/libvirt/openshift-cluster/config.yml
index f782d6dab..477213f4e 100644
--- a/playbooks/libvirt/openshift-cluster/config.yml
+++ b/playbooks/libvirt/openshift-cluster/config.yml
@@ -3,6 +3,8 @@
# is localhost, so no hostname value (or public_hostname) value is getting
# assigned
+- include: ../../common/openshift-cluster/std_include.yml
+
- hosts: localhost
gather_facts: no
tasks:
diff --git a/requirements.txt b/requirements.txt
index 734ee6201..dae460713 100644
--- a/requirements.txt
+++ b/requirements.txt
@@ -7,3 +7,4 @@ pyOpenSSL==16.2.0
# We need to disable ruamel.yaml for now because of test failures
#ruamel.yaml
six==1.10.0
+passlib==1.6.5
diff --git a/roles/calico/defaults/main.yaml b/roles/calico/defaults/main.yaml
index 03c612982..c7eea46f2 100644
--- a/roles/calico/defaults/main.yaml
+++ b/roles/calico/defaults/main.yaml
@@ -1,15 +1,10 @@
---
kubeconfig: "{{openshift.common.config_base}}/node/{{ 'system:node:' + openshift.common.hostname }}.kubeconfig"
-etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls | join(',') }}"
cni_conf_dir: "/etc/cni/net.d/"
cni_bin_dir: "/opt/cni/bin/"
cni_url: "https://github.com/containernetworking/cni/releases/download/v0.4.0/cni-amd64-v0.4.0.tgz"
-calico_etcd_ca_cert_file: "/etc/origin/calico/calico.etcd-ca.crt"
-calico_etcd_cert_file: "/etc/origin/calico/calico.etcd-client.crt"
-calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
-
calico_url_cni: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico"
calico_url_ipam: "https://github.com/projectcalico/cni-plugin/releases/download/v1.5.5/calico-ipam"
diff --git a/roles/calico/tasks/gen_certs.yml b/roles/calico/tasks/gen_certs.yml
new file mode 100644
index 000000000..2e6aa114e
--- /dev/null
+++ b/roles/calico/tasks/gen_certs.yml
@@ -0,0 +1,17 @@
+---
+- name: Calico Node | Generate OpenShift-etcd certs
+ include: ../../../roles/etcd_client_certificates/tasks/main.yml
+ vars:
+ etcd_cert_prefix: calico.etcd-
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
+ embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
+
+- name: Calico Node | Set etcd cert location facts
+ set_fact:
+ calico_etcd_ca_cert_file: "/etc/origin/calico/calico.etcd-ca.crt"
+ calico_etcd_cert_file: "/etc/origin/calico/calico.etcd-client.crt"
+ calico_etcd_key_file: "/etc/origin/calico/calico.etcd-client.key"
+ calico_etcd_endpoints: "{{ hostvars[groups.oo_first_master.0].openshift.master.etcd_urls | join(',') }}"
+ calico_etcd_cert_dir: "/etc/origin/calico/"
diff --git a/roles/calico/tasks/main.yml b/roles/calico/tasks/main.yml
index fa5e338b3..8a7a61dc9 100644
--- a/roles/calico/tasks/main.yml
+++ b/roles/calico/tasks/main.yml
@@ -1,19 +1,36 @@
---
-- include: ../../../roles/etcd_client_certificates/tasks/main.yml
- vars:
- etcd_cert_prefix: calico.etcd-
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/calico"
- embedded_etcd: "{{ hostvars[groups.oo_first_master.0].openshift.master.embedded_etcd }}"
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-calico-{{ openshift.common.hostname }}"
+- name: Calico Node | Error if invalid cert arguments
+ fail:
+ msg: "Must provide all or none for the following etcd params: calico_etcd_cert_dir, calico_etcd_ca_cert_file, calico_etcd_cert_file, calico_etcd_key_file, calico_etcd_endpoints"
+ when: (calico_etcd_cert_dir is defined or calico_etcd_ca_cert_file is defined or calico_etcd_cert_file is defined or calico_etcd_key_file is defined or calico_etcd_endpoints is defined) and not (calico_etcd_cert_dir is defined and calico_etcd_ca_cert_file is defined and calico_etcd_cert_file is defined and calico_etcd_key_file is defined and calico_etcd_endpoints is defined)
-- name: Calico Node | Assure the calico certs have been generated
+- name: Calico Node | Generate certs if not provided
+ include: gen_certs.yml
+ when: item is not defined
+ with_items:
+ - calico_etcd_ca_cert_file
+ - calico_etcd_cert_file
+ - calico_etcd_key_file
+ - calico_etcd_endpoints
+ - calico_etcd_cert_dir
+
+- name: Calico Node | Error if no certs set.
+ fail:
+ msg: "Invalid etcd configuration for calico."
+ when: item is not defined or item == ''
+ with_items:
+ - calico_etcd_ca_cert_file
+ - calico_etcd_cert_file
+ - calico_etcd_key_file
+ - calico_etcd_endpoints
+
+- name: Calico Node | Assure the calico certs are present
stat:
path: "{{ item }}"
with_items:
- - "{{ calico_etcd_ca_cert_file }}"
- - "{{ calico_etcd_cert_file}}"
- - "{{ calico_etcd_key_file }}"
+ - "{{ calico_etcd_ca_cert_file }}"
+ - "{{ calico_etcd_cert_file}}"
+ - "{{ calico_etcd_key_file }}"
- name: Calico Node | Configure Calico service unit file
template:
diff --git a/roles/calico/templates/calicoctl.conf.j2 b/roles/calico/templates/10-calico.conf.j2
index 3c8c6b046..1ec569cff 100644
--- a/roles/calico/templates/calicoctl.conf.j2
+++ b/roles/calico/templates/10-calico.conf.j2
@@ -4,7 +4,7 @@
"ipam": {
"type": "calico-ipam"
},
- "etcd_endpoints": "{{ etcd_endpoints }}",
+ "etcd_endpoints": "{{ calico_etcd_endpoints }}",
"etcd_key_file": "{{ calico_etcd_key_file }}",
"etcd_cert_file": "{{ calico_etcd_cert_file }}",
"etcd_ca_cert_file": "{{ calico_etcd_ca_cert_file }}",
diff --git a/roles/calico/templates/calico.service.j2 b/roles/calico/templates/calico.service.j2
index 719d7ba0d..302c5f34e 100644
--- a/roles/calico/templates/calico.service.j2
+++ b/roles/calico/templates/calico.service.j2
@@ -13,8 +13,8 @@ ExecStart=/usr/bin/docker run --net=host --privileged \
-e CALICO_IPV4POOL_IPIP={{ calico_ipv4pool_ipip }} \
-e CALICO_IPV4POOL_CIDR={{ calico_ipv4pool_cidr }} \
-e FELIX_IPV6SUPPORT=false \
- -e ETCD_ENDPOINTS={{ etcd_endpoints }} \
- -v /etc/origin/calico:/etc/origin/calico \
+ -e ETCD_ENDPOINTS={{ calico_etcd_endpoints }} \
+ -v {{ calico_etcd_cert_dir }}:{{ calico_etcd_cert_dir }} \
-e ETCD_CA_CERT_FILE={{ calico_etcd_ca_cert_file }} \
-e ETCD_CERT_FILE={{ calico_etcd_cert_file }} \
-e ETCD_KEY_FILE={{ calico_etcd_key_file }} \
diff --git a/roles/calico/templates/10-calico.cfg.j2 b/roles/calico/templates/calicoctl.cfg.j2
index 722385ed8..a00ea27dc 100644
--- a/roles/calico/templates/10-calico.cfg.j2
+++ b/roles/calico/templates/calicoctl.cfg.j2
@@ -3,7 +3,7 @@ kind: calicoApiConfig
metadata:
spec:
datastoreType: "etcdv2"
- etcdEndpoints: "{{ etcd_endpoints }}"
+ etcdEndpoints: "{{ calico_etcd_endpoints }}"
etcdKeyFile: "{{ calico_etcd_key_file }}"
etcdCertFile: "{{ calico_etcd_cert_file }}"
etcdCaCertFile: "{{ calico_etcd_ca_cert_file }}"
diff --git a/roles/calico_master/templates/calico-policy-controller.yml.j2 b/roles/calico_master/templates/calico-policy-controller.yml.j2
index 3fb1abf0d..1b87758ce 100644
--- a/roles/calico_master/templates/calico-policy-controller.yml.j2
+++ b/roles/calico_master/templates/calico-policy-controller.yml.j2
@@ -78,7 +78,7 @@ spec:
env:
# The location of the Calico etcd cluster.
- name: ETCD_ENDPOINTS
- value: {{ etcd_endpoints }}
+ value: {{ calico_etcd_endpoints }}
# Location of the CA certificate for etcd.
- name: ETCD_CA_CERT_FILE
value: {{ calico_etcd_ca_cert_file }}
@@ -96,10 +96,10 @@ spec:
volumeMounts:
# Mount in the etcd TLS secrets.
- name: certs
- mountPath: /etc/origin/calico
+ mountPath: {{ calico_etcd_cert_dir }}
volumes:
# Mount in the etcd TLS secrets.
- name: certs
hostPath:
- path: /etc/origin/calico
+ path: {{ calico_etcd_cert_dir }}
diff --git a/roles/docker/README.md b/roles/docker/README.md
index 4a9f21f22..19908c036 100644
--- a/roles/docker/README.md
+++ b/roles/docker/README.md
@@ -3,7 +3,7 @@ Docker
Ensures docker package or system container is installed, and optionally raises timeout for systemd-udevd.service to 5 minutes.
-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
+container-daemon.json items may be found at https://docs.docker.com/engine/reference/commandline/dockerd/#daemon-configuration-file
Requirements
------------
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index e101730d2..c82d8659a 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -102,6 +102,21 @@
notify:
- restart docker
+- stat: path=/etc/sysconfig/docker-network
+ register: sysconfig_docker_network_check
+
+- name: Configure Docker Network OPTIONS
+ lineinfile:
+ dest: /etc/sysconfig/docker-network
+ regexp: '^DOCKER_NETWORK_OPTIONS=.*$'
+ line: "DOCKER_NETWORK_OPTIONS='\
+ {% if openshift.node is defined and openshift.node.sdn_mtu is defined %} --mtu={{ openshift.node.sdn_mtu }}{% endif %}'"
+ when:
+ - sysconfig_docker_network_check.stat.isreg is defined
+ - sysconfig_docker_network_check.stat.isreg
+ notify:
+ - restart docker
+
- name: Start the Docker service
systemd:
name: docker
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 3af3e00b2..650f06f86 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -102,7 +102,7 @@
l_docker_image: "{{ l_docker_image_prepend }}/{{ openshift.docker.service_name }}:latest"
# NOTE: no_proxy added as a workaround until https://github.com/projectatomic/atomic/pull/999 is released
-- name: Pre-pull Container Enginer System Container image
+- name: Pre-pull Container Engine System Container image
command: "atomic pull --storage ostree {{ l_docker_image }}"
changed_when: false
environment:
@@ -119,21 +119,19 @@
path: "{{ docker_conf_dir }}"
state: directory
-- name: Install Container Enginer System Container
+- name: Install Container Engine System Container
oc_atomic_container:
name: "{{ openshift.docker.service_name }}"
image: "{{ l_docker_image }}"
state: latest
- values:
- - "system-package=no"
- name: Configure Container Engine Service File
template:
dest: "{{ container_engine_systemd_dir }}/custom.conf"
src: systemcontainercustom.conf.j2
-# Set local versions of facts that must be in json format for daemon.json
-# NOTE: When jinja2.9+ is used the daemon.json file can move to using tojson
+# Set local versions of facts that must be in json format for container-daemon.json
+# NOTE: When jinja2.9+ is used the container-daemon.json file can move to using tojson
- set_fact:
l_docker_insecure_registries: "{{ docker_insecure_registries | default([]) | to_json }}"
l_docker_log_options: "{{ docker_log_options | default({}) | to_json }}"
@@ -141,10 +139,12 @@
l_docker_blocked_registries: "{{ docker_blocked_registries | default([]) | to_json }}"
l_docker_selinux_enabled: "{{ docker_selinux_enabled | default(true) | to_json }}"
-# Configure container-engine using the daemon.json file
+# Configure container-engine using the container-daemon.json file
+# NOTE: daemon.json and container-daemon.json have been seperated to avoid
+# collision.
- name: Configure Container Engine
template:
- dest: "{{ docker_conf_dir }}/daemon.json"
+ dest: "{{ docker_conf_dir }}/container-daemon.json"
src: daemon.json
# Enable and start the container-engine service
diff --git a/roles/lib_openshift/library/oc_adm_ca_server_cert.py b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
index a6273cfe4..3974cc4dd 100644
--- a/roles/lib_openshift/library/oc_adm_ca_server_cert.py
+++ b/roles/lib_openshift/library/oc_adm_ca_server_cert.py
@@ -952,7 +952,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1405,7 +1405,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1419,18 +1418,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_manage_node.py b/roles/lib_openshift/library/oc_adm_manage_node.py
index 7493b5c3d..320eac17e 100644
--- a/roles/lib_openshift/library/oc_adm_manage_node.py
+++ b/roles/lib_openshift/library/oc_adm_manage_node.py
@@ -938,7 +938,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1391,7 +1391,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1405,18 +1404,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_group.py b/roles/lib_openshift/library/oc_adm_policy_group.py
index 5e72f5954..f9658d6e1 100644
--- a/roles/lib_openshift/library/oc_adm_policy_group.py
+++ b/roles/lib_openshift/library/oc_adm_policy_group.py
@@ -924,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1377,7 +1377,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1391,18 +1390,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_policy_user.py b/roles/lib_openshift/library/oc_adm_policy_user.py
index 371a3953b..0bdfd0bad 100644
--- a/roles/lib_openshift/library/oc_adm_policy_user.py
+++ b/roles/lib_openshift/library/oc_adm_policy_user.py
@@ -924,7 +924,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1377,7 +1377,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1391,18 +1390,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_adm_registry.py b/roles/lib_openshift/library/oc_adm_registry.py
index 7240521c6..df0e40d20 100644
--- a/roles/lib_openshift/library/oc_adm_registry.py
+++ b/roles/lib_openshift/library/oc_adm_registry.py
@@ -1042,7 +1042,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1495,7 +1495,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1509,18 +1508,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1889,10 +1898,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -1903,6 +1914,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -1995,7 +2007,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -2006,6 +2019,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -2018,8 +2032,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -2041,6 +2056,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -2049,6 +2068,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -2110,6 +2130,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/volume.py -*- -*- -*-
@@ -2350,7 +2417,7 @@ class Registry(OpenShiftCLI):
def prepare_registry(self):
''' prepare a registry for instantiation '''
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
@@ -2656,7 +2723,7 @@ def main():
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/library/oc_adm_router.py b/roles/lib_openshift/library/oc_adm_router.py
index a54c62cd4..8af8cb196 100644
--- a/roles/lib_openshift/library/oc_adm_router.py
+++ b/roles/lib_openshift/library/oc_adm_router.py
@@ -1067,7 +1067,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1520,7 +1520,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1534,18 +1533,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1568,7 +1577,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -1579,6 +1589,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -1591,8 +1602,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -1614,6 +1626,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -1622,6 +1638,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -1683,6 +1700,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: lib/deploymentconfig.py -*- -*- -*-
@@ -2178,10 +2242,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -2192,6 +2258,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -2782,7 +2849,7 @@ class Router(OpenShiftCLI):
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
@@ -3083,7 +3150,7 @@ def main():
key_file=dict(default=None, type='str'),
images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/library/oc_atomic_container.py b/roles/lib_openshift/library/oc_atomic_container.py
index d2620b4cc..1e017a576 100644
--- a/roles/lib_openshift/library/oc_atomic_container.py
+++ b/roles/lib_openshift/library/oc_atomic_container.py
@@ -73,7 +73,9 @@ from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
- args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ # NOTE: system-package=no is hardcoded. This should be changed to an option in the future.
+ args = ['atomic', 'install', '--system', '--system-package=no',
+ '--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
@@ -157,7 +159,9 @@ def core(module):
module.fail_json(rc=rc, msg=err)
return
- containers = json.loads(out)
+ # NOTE: "or '[]' is a workaround until atomic containers list --json
+ # provides an empty list when no containers are present.
+ containers = json.loads(out or '[]')
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
diff --git a/roles/lib_openshift/library/oc_clusterrole.py b/roles/lib_openshift/library/oc_clusterrole.py
index 78c72ef26..3ed0d65dc 100644
--- a/roles/lib_openshift/library/oc_clusterrole.py
+++ b/roles/lib_openshift/library/oc_clusterrole.py
@@ -916,7 +916,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1369,7 +1369,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1383,18 +1382,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_configmap.py b/roles/lib_openshift/library/oc_configmap.py
index c88f56fc6..5c8ed48d2 100644
--- a/roles/lib_openshift/library/oc_configmap.py
+++ b/roles/lib_openshift/library/oc_configmap.py
@@ -922,7 +922,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1375,7 +1375,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1389,18 +1388,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_edit.py b/roles/lib_openshift/library/oc_edit.py
index 17e3f7dde..f3b6d552d 100644
--- a/roles/lib_openshift/library/oc_edit.py
+++ b/roles/lib_openshift/library/oc_edit.py
@@ -966,7 +966,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1419,7 +1419,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1433,18 +1432,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_env.py b/roles/lib_openshift/library/oc_env.py
index 18ab97bc0..c6421128a 100644
--- a/roles/lib_openshift/library/oc_env.py
+++ b/roles/lib_openshift/library/oc_env.py
@@ -933,7 +933,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1386,7 +1386,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1400,18 +1399,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_group.py b/roles/lib_openshift/library/oc_group.py
index 88c6ef209..a791c29af 100644
--- a/roles/lib_openshift/library/oc_group.py
+++ b/roles/lib_openshift/library/oc_group.py
@@ -906,7 +906,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1359,7 +1359,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1373,18 +1372,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_image.py b/roles/lib_openshift/library/oc_image.py
index 45860cbe5..bbc123ce0 100644
--- a/roles/lib_openshift/library/oc_image.py
+++ b/roles/lib_openshift/library/oc_image.py
@@ -925,7 +925,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1378,7 +1378,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1392,18 +1391,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_label.py b/roles/lib_openshift/library/oc_label.py
index 65923a698..cd1afd0d2 100644
--- a/roles/lib_openshift/library/oc_label.py
+++ b/roles/lib_openshift/library/oc_label.py
@@ -942,7 +942,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1395,7 +1395,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1409,18 +1408,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_obj.py b/roles/lib_openshift/library/oc_obj.py
index 1d75a21b9..215723cc8 100644
--- a/roles/lib_openshift/library/oc_obj.py
+++ b/roles/lib_openshift/library/oc_obj.py
@@ -945,7 +945,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1398,7 +1398,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1412,18 +1411,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_objectvalidator.py b/roles/lib_openshift/library/oc_objectvalidator.py
index 72add01f4..358ef5130 100644
--- a/roles/lib_openshift/library/oc_objectvalidator.py
+++ b/roles/lib_openshift/library/oc_objectvalidator.py
@@ -877,7 +877,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1330,7 +1330,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1344,18 +1343,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_process.py b/roles/lib_openshift/library/oc_process.py
index 8e1ffe90f..025b846c6 100644
--- a/roles/lib_openshift/library/oc_process.py
+++ b/roles/lib_openshift/library/oc_process.py
@@ -934,7 +934,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1387,7 +1387,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1401,18 +1400,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1450,7 +1459,7 @@ class OCProcess(OpenShiftCLI):
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Error processing template [%s].' % self.name)
+ raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))
self._template = results['results']['items']
return self._template
diff --git a/roles/lib_openshift/library/oc_project.py b/roles/lib_openshift/library/oc_project.py
index a06852fd8..05dfddab8 100644
--- a/roles/lib_openshift/library/oc_project.py
+++ b/roles/lib_openshift/library/oc_project.py
@@ -931,7 +931,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1384,7 +1384,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1398,18 +1397,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_pvc.py b/roles/lib_openshift/library/oc_pvc.py
index 79673452d..d7de4964c 100644
--- a/roles/lib_openshift/library/oc_pvc.py
+++ b/roles/lib_openshift/library/oc_pvc.py
@@ -926,7 +926,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1379,7 +1379,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1393,18 +1392,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_route.py b/roles/lib_openshift/library/oc_route.py
index ad705a6c5..3090b4cad 100644
--- a/roles/lib_openshift/library/oc_route.py
+++ b/roles/lib_openshift/library/oc_route.py
@@ -976,7 +976,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1429,7 +1429,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1443,18 +1442,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_scale.py b/roles/lib_openshift/library/oc_scale.py
index 291ac8b19..6a505fb6b 100644
--- a/roles/lib_openshift/library/oc_scale.py
+++ b/roles/lib_openshift/library/oc_scale.py
@@ -920,7 +920,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1373,7 +1373,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1387,18 +1386,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index df28df2bc..02257500f 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -108,6 +108,12 @@ options:
required: false
default: None
aliases: []
+ type:
+ description:
+ - The secret type.
+ required: false
+ default: None
+ aliases: []
force:
description:
- Whether or not to force the operation
@@ -966,7 +972,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1419,7 +1425,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1433,18 +1438,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1461,10 +1476,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -1475,6 +1492,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
@@ -1564,12 +1582,14 @@ class OCSecret(OpenShiftCLI):
def __init__(self,
namespace,
secret_name=None,
+ secret_type=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
+ self.type = secret_type
self.decode = decode
def get(self):
@@ -1600,6 +1620,8 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.append("--type=%s" % (self.type))
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -1633,6 +1655,8 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.extend(["--type=%s" % (self.type)])
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -1645,6 +1669,7 @@ class OCSecret(OpenShiftCLI):
ocsecret = OCSecret(params['namespace'],
params['name'],
+ params['type'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
@@ -1767,6 +1792,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
+ type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
diff --git a/roles/lib_openshift/library/oc_service.py b/roles/lib_openshift/library/oc_service.py
index e98f83cc3..308f45488 100644
--- a/roles/lib_openshift/library/oc_service.py
+++ b/roles/lib_openshift/library/oc_service.py
@@ -140,6 +140,13 @@ options:
- LoadBalancer
- ExternalName
aliases: []
+ externalips:
+ description:
+ - A list of the external IPs that are exposed for this service.
+ - https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
@@ -972,7 +979,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1425,7 +1432,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1439,18 +1445,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
@@ -1473,7 +1489,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -1484,6 +1501,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -1496,8 +1514,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -1519,6 +1538,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -1527,6 +1550,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -1588,6 +1612,53 @@ class Service(Yedit):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
+
# -*- -*- -*- End included fragment: lib/service.py -*- -*- -*-
# -*- -*- -*- Begin included fragment: class/oc_service.py -*- -*- -*-
@@ -1610,13 +1681,15 @@ class OCService(OpenShiftCLI):
ports,
session_affinity,
service_type,
+ external_ips,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
- cluster_ip, portal_ip, session_affinity, service_type)
+ cluster_ip, portal_ip, session_affinity, service_type,
+ external_ips)
self.user_svc = Service(content=self.config.data)
self.svc = None
@@ -1685,6 +1758,7 @@ class OCService(OpenShiftCLI):
params['ports'],
params['session_affinity'],
params['service_type'],
+ params['external_ips'],
params['kubeconfig'],
params['debug'])
@@ -1786,6 +1860,7 @@ def main():
ports=dict(default=None, type='list'),
session_affinity=dict(default='None', type='str'),
service_type=dict(default='ClusterIP', type='str'),
+ external_ips=dict(default=None, type='list'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/library/oc_serviceaccount.py b/roles/lib_openshift/library/oc_serviceaccount.py
index f00e9e4f6..68c1fc51c 100644
--- a/roles/lib_openshift/library/oc_serviceaccount.py
+++ b/roles/lib_openshift/library/oc_serviceaccount.py
@@ -918,7 +918,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1371,7 +1371,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1385,18 +1384,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_serviceaccount_secret.py b/roles/lib_openshift/library/oc_serviceaccount_secret.py
index 6691495a6..ebc5bf8a2 100644
--- a/roles/lib_openshift/library/oc_serviceaccount_secret.py
+++ b/roles/lib_openshift/library/oc_serviceaccount_secret.py
@@ -918,7 +918,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1371,7 +1371,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1385,18 +1384,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_user.py b/roles/lib_openshift/library/oc_user.py
index 72f2fbf03..d1a20fddc 100644
--- a/roles/lib_openshift/library/oc_user.py
+++ b/roles/lib_openshift/library/oc_user.py
@@ -978,7 +978,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1431,7 +1431,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1445,18 +1444,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_version.py b/roles/lib_openshift/library/oc_version.py
index bc3340a94..548c9d8e0 100644
--- a/roles/lib_openshift/library/oc_version.py
+++ b/roles/lib_openshift/library/oc_version.py
@@ -890,7 +890,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1343,7 +1343,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1357,18 +1356,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/library/oc_volume.py b/roles/lib_openshift/library/oc_volume.py
index 9dec0a6d4..3826cd8e5 100644
--- a/roles/lib_openshift/library/oc_volume.py
+++ b/roles/lib_openshift/library/oc_volume.py
@@ -967,7 +967,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -1420,7 +1420,6 @@ class Utils(object): # pragma: no cover
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -1434,18 +1433,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/src/ansible/oc_adm_registry.py b/roles/lib_openshift/src/ansible/oc_adm_registry.py
index c85973c7d..d669a3488 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_registry.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_registry.py
@@ -17,7 +17,7 @@ def main():
kubeconfig=dict(default='/etc/origin/master/admin.kubeconfig', type='str'),
images=dict(default=None, type='str'),
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['5000'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/src/ansible/oc_adm_router.py b/roles/lib_openshift/src/ansible/oc_adm_router.py
index b6f8e90d0..c6563cc2f 100644
--- a/roles/lib_openshift/src/ansible/oc_adm_router.py
+++ b/roles/lib_openshift/src/ansible/oc_adm_router.py
@@ -21,7 +21,7 @@ def main():
key_file=dict(default=None, type='str'),
images=dict(default=None, type='str'), #'openshift3/ose-${component}:${version}'
latest_images=dict(default=False, type='bool'),
- labels=dict(default=None, type='list'),
+ labels=dict(default=None, type='dict'),
ports=dict(default=['80:80', '443:443'], type='list'),
replicas=dict(default=1, type='int'),
selector=dict(default=None, type='str'),
diff --git a/roles/lib_openshift/src/ansible/oc_atomic_container.py b/roles/lib_openshift/src/ansible/oc_atomic_container.py
index 20d75cb63..1a5ab6869 100644
--- a/roles/lib_openshift/src/ansible/oc_atomic_container.py
+++ b/roles/lib_openshift/src/ansible/oc_atomic_container.py
@@ -9,7 +9,9 @@ from ansible.module_utils.basic import AnsibleModule
def _install(module, container, image, values_list):
''' install a container using atomic CLI. values_list is the list of --set arguments.
container is the name given to the container. image is the image to use for the installation. '''
- args = ['atomic', 'install', "--system", '--name=%s' % container] + values_list + [image]
+ # NOTE: system-package=no is hardcoded. This should be changed to an option in the future.
+ args = ['atomic', 'install', '--system', '--system-package=no',
+ '--name=%s' % container] + values_list + [image]
rc, out, err = module.run_command(args, check_rc=False)
if rc != 0:
return rc, out, err, False
@@ -93,7 +95,9 @@ def core(module):
module.fail_json(rc=rc, msg=err)
return
- containers = json.loads(out)
+ # NOTE: "or '[]' is a workaround until atomic containers list --json
+ # provides an empty list when no containers are present.
+ containers = json.loads(out or '[]')
present = len(containers) > 0
old_image = containers[0]["image_name"] if present else None
diff --git a/roles/lib_openshift/src/ansible/oc_secret.py b/roles/lib_openshift/src/ansible/oc_secret.py
index 1337cbbe5..faa7c1772 100644
--- a/roles/lib_openshift/src/ansible/oc_secret.py
+++ b/roles/lib_openshift/src/ansible/oc_secret.py
@@ -15,6 +15,7 @@ def main():
debug=dict(default=False, type='bool'),
namespace=dict(default='default', type='str'),
name=dict(default=None, type='str'),
+ type=dict(default=None, type='str'),
files=dict(default=None, type='list'),
delete_after=dict(default=False, type='bool'),
contents=dict(default=None, type='list'),
diff --git a/roles/lib_openshift/src/ansible/oc_service.py b/roles/lib_openshift/src/ansible/oc_service.py
index 9eb144e9c..b90c08255 100644
--- a/roles/lib_openshift/src/ansible/oc_service.py
+++ b/roles/lib_openshift/src/ansible/oc_service.py
@@ -21,6 +21,7 @@ def main():
ports=dict(default=None, type='list'),
session_affinity=dict(default='None', type='str'),
service_type=dict(default='ClusterIP', type='str'),
+ external_ips=dict(default=None, type='list'),
),
supports_check_mode=True,
)
diff --git a/roles/lib_openshift/src/class/oc_adm_registry.py b/roles/lib_openshift/src/class/oc_adm_registry.py
index 3c130fe28..ad6869bb6 100644
--- a/roles/lib_openshift/src/class/oc_adm_registry.py
+++ b/roles/lib_openshift/src/class/oc_adm_registry.py
@@ -143,7 +143,7 @@ class Registry(OpenShiftCLI):
def prepare_registry(self):
''' prepare a registry for instantiation '''
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['registry']
cmd.extend(options)
diff --git a/roles/lib_openshift/src/class/oc_adm_router.py b/roles/lib_openshift/src/class/oc_adm_router.py
index 1a0b94b80..0d50116d1 100644
--- a/roles/lib_openshift/src/class/oc_adm_router.py
+++ b/roles/lib_openshift/src/class/oc_adm_router.py
@@ -222,7 +222,7 @@ class Router(OpenShiftCLI):
# No certificate was passed to us. do not pass one to oc adm router
self.config.config_options['default_cert']['include'] = False
- options = self.config.to_option_list()
+ options = self.config.to_option_list(ascommalist='labels')
cmd = ['router', self.config.name]
cmd.extend(options)
diff --git a/roles/lib_openshift/src/class/oc_process.py b/roles/lib_openshift/src/class/oc_process.py
index eba9a43cd..62a6bd571 100644
--- a/roles/lib_openshift/src/class/oc_process.py
+++ b/roles/lib_openshift/src/class/oc_process.py
@@ -30,7 +30,7 @@ class OCProcess(OpenShiftCLI):
if self._template is None:
results = self._process(self.name, False, self.params, self.data)
if results['returncode'] != 0:
- raise OpenShiftCLIError('Error processing template [%s].' % self.name)
+ raise OpenShiftCLIError('Error processing template [%s]: %s' %(self.name, results))
self._template = results['results']['items']
return self._template
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index deb36a9fa..ee83580df 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -13,12 +13,14 @@ class OCSecret(OpenShiftCLI):
def __init__(self,
namespace,
secret_name=None,
+ secret_type=None,
decode=False,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OpenshiftOC '''
super(OCSecret, self).__init__(namespace, kubeconfig=kubeconfig, verbose=verbose)
self.name = secret_name
+ self.type = secret_type
self.decode = decode
def get(self):
@@ -49,6 +51,8 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.append("--type=%s" % (self.type))
cmd.extend(secrets)
results = self.openshift_cmd(cmd)
@@ -82,6 +86,8 @@ class OCSecret(OpenShiftCLI):
secrets = ["%s=%s" % (sfile['name'], sfile['path']) for sfile in files]
cmd = ['-ojson', 'secrets', 'new', self.name]
+ if self.type is not None:
+ cmd.extend(["--type=%s" % (self.type)])
cmd.extend(secrets)
return self.openshift_cmd(cmd, output=True)
@@ -94,6 +100,7 @@ class OCSecret(OpenShiftCLI):
ocsecret = OCSecret(params['namespace'],
params['name'],
+ params['type'],
params['decode'],
kubeconfig=params['kubeconfig'],
verbose=params['debug'])
diff --git a/roles/lib_openshift/src/class/oc_service.py b/roles/lib_openshift/src/class/oc_service.py
index 20cf23df5..7268a0c88 100644
--- a/roles/lib_openshift/src/class/oc_service.py
+++ b/roles/lib_openshift/src/class/oc_service.py
@@ -19,13 +19,15 @@ class OCService(OpenShiftCLI):
ports,
session_affinity,
service_type,
+ external_ips,
kubeconfig='/etc/origin/master/admin.kubeconfig',
verbose=False):
''' Constructor for OCVolume '''
super(OCService, self).__init__(namespace, kubeconfig, verbose)
self.namespace = namespace
self.config = ServiceConfig(sname, namespace, ports, selector, labels,
- cluster_ip, portal_ip, session_affinity, service_type)
+ cluster_ip, portal_ip, session_affinity, service_type,
+ external_ips)
self.user_svc = Service(content=self.config.data)
self.svc = None
@@ -94,6 +96,7 @@ class OCService(OpenShiftCLI):
params['ports'],
params['session_affinity'],
params['service_type'],
+ params['external_ips'],
params['kubeconfig'],
params['debug'])
diff --git a/roles/lib_openshift/src/doc/secret b/roles/lib_openshift/src/doc/secret
index 5c2bd9bc0..76b147f6f 100644
--- a/roles/lib_openshift/src/doc/secret
+++ b/roles/lib_openshift/src/doc/secret
@@ -57,6 +57,12 @@ options:
required: false
default: None
aliases: []
+ type:
+ description:
+ - The secret type.
+ required: false
+ default: None
+ aliases: []
force:
description:
- Whether or not to force the operation
diff --git a/roles/lib_openshift/src/doc/service b/roles/lib_openshift/src/doc/service
index 418f91dc5..ba9aa0b38 100644
--- a/roles/lib_openshift/src/doc/service
+++ b/roles/lib_openshift/src/doc/service
@@ -89,6 +89,13 @@ options:
- LoadBalancer
- ExternalName
aliases: []
+ externalips:
+ description:
+ - A list of the external IPs that are exposed for this service.
+ - https://kubernetes.io/docs/concepts/services-networking/service/#external-ips
+ required: false
+ default: None
+ aliases: []
author:
- "Kenny Woodson <kwoodson@redhat.com>"
extends_documentation_fragment: []
diff --git a/roles/lib_openshift/src/lib/base.py b/roles/lib_openshift/src/lib/base.py
index 2bf795e25..b3f01008b 100644
--- a/roles/lib_openshift/src/lib/base.py
+++ b/roles/lib_openshift/src/lib/base.py
@@ -128,7 +128,7 @@ class OpenShiftCLI(object):
else:
cmd.append(template_name)
if params:
- param_str = ["{}={}".format(key, value) for key, value in params.items()]
+ param_str = ["{}={}".format(key, str(value).replace("'", r'"')) for key, value in params.items()]
cmd.append('-v')
cmd.extend(param_str)
@@ -581,7 +581,6 @@ class Utils(object):
print('returning true')
return True
-
class OpenShiftCLIConfig(object):
'''Generic Config'''
def __init__(self, rname, namespace, kubeconfig, options):
@@ -595,18 +594,28 @@ class OpenShiftCLIConfig(object):
''' return config options '''
return self._options
- def to_option_list(self):
- '''return all options as a string'''
- return self.stringify()
-
- def stringify(self):
- ''' return the options hash as cli params in a string '''
+ def to_option_list(self, ascommalist=''):
+ '''return all options as a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs'''
+ return self.stringify(ascommalist)
+
+ def stringify(self, ascommalist=''):
+ ''' return the options hash as cli params in a string
+ if ascommalist is set to the name of a key, and
+ the value of that key is a dict, format the dict
+ as a list of comma delimited key=value pairs '''
rval = []
for key in sorted(self.config_options.keys()):
data = self.config_options[key]
if data['include'] \
and (data['value'] or isinstance(data['value'], int)):
- rval.append('--{}={}'.format(key.replace('_', '-'), data['value']))
+ if key == ascommalist:
+ val = ','.join(['{}={}'.format(kk, vv) for kk, vv in sorted(data['value'].items())])
+ else:
+ val = data['value']
+ rval.append('--{}={}'.format(key.replace('_', '-'), val))
return rval
diff --git a/roles/lib_openshift/src/lib/secret.py b/roles/lib_openshift/src/lib/secret.py
index 75c32e8b1..a1c202442 100644
--- a/roles/lib_openshift/src/lib/secret.py
+++ b/roles/lib_openshift/src/lib/secret.py
@@ -9,10 +9,12 @@ class SecretConfig(object):
sname,
namespace,
kubeconfig,
- secrets=None):
+ secrets=None,
+ stype=None):
''' constructor for handling secret options '''
self.kubeconfig = kubeconfig
self.name = sname
+ self.type = stype
self.namespace = namespace
self.secrets = secrets
self.data = {}
@@ -23,6 +25,7 @@ class SecretConfig(object):
''' assign the correct properties for a secret dict '''
self.data['apiVersion'] = 'v1'
self.data['kind'] = 'Secret'
+ self.data['type'] = self.type
self.data['metadata'] = {}
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
diff --git a/roles/lib_openshift/src/lib/service.py b/roles/lib_openshift/src/lib/service.py
index eef568779..0e8cc3aa5 100644
--- a/roles/lib_openshift/src/lib/service.py
+++ b/roles/lib_openshift/src/lib/service.py
@@ -15,7 +15,8 @@ class ServiceConfig(object):
cluster_ip=None,
portal_ip=None,
session_affinity=None,
- service_type=None):
+ service_type=None,
+ external_ips=None):
''' constructor for handling service options '''
self.name = sname
self.namespace = namespace
@@ -26,6 +27,7 @@ class ServiceConfig(object):
self.portal_ip = portal_ip
self.session_affinity = session_affinity
self.service_type = service_type
+ self.external_ips = external_ips
self.data = {}
self.create_dict()
@@ -38,8 +40,9 @@ class ServiceConfig(object):
self.data['metadata']['name'] = self.name
self.data['metadata']['namespace'] = self.namespace
if self.labels:
- for lab, lab_value in self.labels.items():
- self.data['metadata'][lab] = lab_value
+ self.data['metadata']['labels'] = {}
+ for lab, lab_value in self.labels.items():
+ self.data['metadata']['labels'][lab] = lab_value
self.data['spec'] = {}
if self.ports:
@@ -61,6 +64,10 @@ class ServiceConfig(object):
if self.service_type:
self.data['spec']['type'] = self.service_type
+ if self.external_ips:
+ self.data['spec']['externalIPs'] = self.external_ips
+
+
# pylint: disable=too-many-instance-attributes,too-many-public-methods
class Service(Yedit):
''' Class to model the oc service object '''
@@ -69,6 +76,7 @@ class Service(Yedit):
cluster_ip = "spec.clusterIP"
selector_path = 'spec.selector'
kind = 'Service'
+ external_ips = "spec.externalIPs"
def __init__(self, content):
'''Service constructor'''
@@ -129,3 +137,50 @@ class Service(Yedit):
def add_portal_ip(self, pip):
'''add cluster ip'''
self.put(Service.portal_ip, pip)
+
+ def get_external_ips(self):
+ ''' get a list of external_ips '''
+ return self.get(Service.external_ips) or []
+
+ def add_external_ips(self, inc_external_ips):
+ ''' add an external_ip to the external_ips list '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get_external_ips()
+ if not external_ips:
+ self.put(Service.external_ips, inc_external_ips)
+ else:
+ external_ips.extend(inc_external_ips)
+
+ return True
+
+ def find_external_ips(self, inc_external_ip):
+ ''' find a specific external IP '''
+ val = None
+ try:
+ idx = self.get_external_ips().index(inc_external_ip)
+ val = self.get_external_ips()[idx]
+ except ValueError:
+ pass
+
+ return val
+
+ def delete_external_ips(self, inc_external_ips):
+ ''' remove an external IP from a service '''
+ if not isinstance(inc_external_ips, list):
+ inc_external_ips = [inc_external_ips]
+
+ external_ips = self.get(Service.external_ips) or []
+
+ if not external_ips:
+ return True
+
+ removed = False
+ for inc_external_ip in inc_external_ips:
+ external_ip = self.find_external_ips(inc_external_ip)
+ if external_ip:
+ external_ips.remove(external_ip)
+ removed = True
+
+ return removed
diff --git a/roles/lib_openshift/src/test/integration/oc_service.yml b/roles/lib_openshift/src/test/integration/oc_service.yml
index 3eb6facef..29535f24a 100755
--- a/roles/lib_openshift/src/test/integration/oc_service.yml
+++ b/roles/lib_openshift/src/test/integration/oc_service.yml
@@ -18,6 +18,9 @@
test-registtry: default
session_affinity: ClientIP
service_type: ClusterIP
+ labels:
+ component: test-registry
+ infra: registry
register: svc_out
- debug: var=svc_out
@@ -25,6 +28,8 @@
that:
- "svc_out.results.results[0]['metadata']['name'] == 'test-registry'"
- svc_out.changed
+ - "svc_out.results.results[0]['metadata']['labels']['component'] == 'test-registry'"
+ - "svc_out.results.results[0]['metadata']['labels']['infra'] == 'registry'"
msg: service create failed.
# Test idempotent create
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
index 97cf86170..77787fe87 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_registry.py
@@ -218,7 +218,7 @@ class RegistryTest(unittest.TestCase):
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'images': None,
'latest_images': None,
- 'labels': None,
+ 'labels': {"docker-registry": "default", "another-label": "val"},
'ports': ['5000'],
'replicas': 1,
'selector': 'type=infra',
@@ -255,6 +255,7 @@ class RegistryTest(unittest.TestCase):
mock.call(['oc', 'get', 'dc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'svc', 'docker-registry', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'adm', 'registry',
+ "--labels=another-label=val,docker-registry=default",
'--ports=5000', '--replicas=1', '--selector=type=infra',
'--service-account=registry', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
diff --git a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
index 5481ac623..dcf768e08 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_adm_router.py
@@ -300,7 +300,7 @@ class RouterTest(unittest.TestCase):
'cert_file': None,
'key_file': None,
'cacert_file': None,
- 'labels': None,
+ 'labels': {"router": "router", "another-label": "val"},
'ports': ['80:80', '443:443'],
'images': None,
'latest_images': None,
@@ -363,6 +363,7 @@ class RouterTest(unittest.TestCase):
mock.call(['oc', 'get', 'secret', 'router-certs', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'get', 'clusterrolebinding', 'router-router-role', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'adm', 'router', 'router', '--expose-metrics=False', '--external-host-insecure=False',
+ "--labels=another-label=val,router=router",
'--ports=80:80,443:443', '--replicas=2', '--selector=type=infra', '--service-account=router',
'--stats-port=1936', '--dry-run=True', '-o', 'json', '-n', 'default'], None),
mock.call(['oc', 'create', '-f', mock.ANY, '-n', 'default'], None),
diff --git a/roles/lib_openshift/src/test/unit/test_oc_secret.py b/roles/lib_openshift/src/test/unit/test_oc_secret.py
index e31393793..09cc4a374 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_secret.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_secret.py
@@ -38,6 +38,7 @@ class OCSecretTest(unittest.TestCase):
'state': 'present',
'namespace': 'default',
'name': 'testsecretname',
+ 'type': 'Opaque',
'contents': [{
'path': "/tmp/somesecret.json",
'data': "{'one': 1, 'two': 2, 'three': 3}",
@@ -74,7 +75,7 @@ class OCSecretTest(unittest.TestCase):
# Making sure our mock was called as we expected
mock_cmd.assert_has_calls([
mock.call(['oc', 'get', 'secrets', 'testsecretname', '-o', 'json', '-n', 'default'], None),
- mock.call(['oc', 'secrets', 'new', 'testsecretname', mock.ANY, '-n', 'default'], None),
+ mock.call(['oc', 'secrets', 'new', 'testsecretname', '--type=Opaque', mock.ANY, '-n', 'default'], None),
])
mock_write.assert_has_calls([
diff --git a/roles/lib_openshift/src/test/unit/test_oc_service.py b/roles/lib_openshift/src/test/unit/test_oc_service.py
index e74c66665..9c21a262f 100755
--- a/roles/lib_openshift/src/test/unit/test_oc_service.py
+++ b/roles/lib_openshift/src/test/unit/test_oc_service.py
@@ -39,6 +39,7 @@ class OCServiceTest(unittest.TestCase):
'selector': None,
'session_affinity': None,
'service_type': None,
+ 'external_ips': None,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
@@ -124,6 +125,7 @@ class OCServiceTest(unittest.TestCase):
'selector': {'router': 'router'},
'session_affinity': 'ClientIP',
'service_type': 'ClusterIP',
+ 'external_ips': None,
'kubeconfig': '/etc/origin/master/admin.kubeconfig',
'debug': False}
@@ -303,3 +305,183 @@ class OCServiceTest(unittest.TestCase):
mock_shutil_which.side_effect = lambda _f, path=None: oc_bin
self.assertEqual(locate_oc_binary(), oc_bin)
+
+ @mock.patch('oc_service.Utils.create_tmpfile_copy')
+ @mock.patch('oc_service.OCService._run')
+ def test_create_with_labels(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a create service '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'ports': {'name': '9000-tcp',
+ 'port': 9000,
+ 'protocol': 'TCP',
+ 'targetPOrt': 9000},
+ 'state': 'present',
+ 'labels': {'component': 'some_component', 'infra': 'true'},
+ 'clusterip': None,
+ 'portalip': None,
+ 'selector': {'router': 'router'},
+ 'session_affinity': 'ClientIP',
+ 'service_type': 'ClusterIP',
+ 'external_ips': None,
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ service = '''{
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/services/router",
+ "uid": "fabd2440-e3d8-11e6-951c-0e3dd518cefa",
+ "resourceVersion": "3206",
+ "creationTimestamp": "2017-01-26T15:06:14Z",
+ "labels": {"component": "some_component", "infra": "true"}
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "80-tcp",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 80
+ },
+ {
+ "name": "443-tcp",
+ "protocol": "TCP",
+ "port": 443,
+ "targetPort": 443
+ },
+ {
+ "name": "1936-tcp",
+ "protocol": "TCP",
+ "port": 1936,
+ "targetPort": 1936
+ },
+ {
+ "name": "5000-tcp",
+ "protocol": "TCP",
+ "port": 5000,
+ "targetPort": 5000
+ }
+ ],
+ "selector": {
+ "router": "router"
+ },
+ "clusterIP": "172.30.129.161",
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }'''
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: services "router" not found'),
+ (1, '', 'Error from server: services "router" not found'),
+ (0, service, ''),
+ (0, service, '')
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCService.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['returncode'] == 0)
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'router')
+ self.assertEqual(results['results']['results'][0]['metadata']['labels'], {"component": "some_component", "infra": "true"})
+
+ @mock.patch('oc_service.Utils.create_tmpfile_copy')
+ @mock.patch('oc_service.OCService._run')
+ def test_create_with_external_ips(self, mock_cmd, mock_tmpfile_copy):
+ ''' Testing a create service '''
+ params = {'name': 'router',
+ 'namespace': 'default',
+ 'ports': {'name': '9000-tcp',
+ 'port': 9000,
+ 'protocol': 'TCP',
+ 'targetPOrt': 9000},
+ 'state': 'present',
+ 'labels': {'component': 'some_component', 'infra': 'true'},
+ 'clusterip': None,
+ 'portalip': None,
+ 'selector': {'router': 'router'},
+ 'session_affinity': 'ClientIP',
+ 'service_type': 'ClusterIP',
+ 'external_ips': ['1.2.3.4', '5.6.7.8'],
+ 'kubeconfig': '/etc/origin/master/admin.kubeconfig',
+ 'debug': False}
+
+ service = '''{
+ "kind": "Service",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "router",
+ "namespace": "default",
+ "selfLink": "/api/v1/namespaces/default/services/router",
+ "uid": "fabd2440-e3d8-11e6-951c-0e3dd518cefa",
+ "resourceVersion": "3206",
+ "creationTimestamp": "2017-01-26T15:06:14Z",
+ "labels": {"component": "some_component", "infra": "true"}
+ },
+ "spec": {
+ "ports": [
+ {
+ "name": "80-tcp",
+ "protocol": "TCP",
+ "port": 80,
+ "targetPort": 80
+ },
+ {
+ "name": "443-tcp",
+ "protocol": "TCP",
+ "port": 443,
+ "targetPort": 443
+ },
+ {
+ "name": "1936-tcp",
+ "protocol": "TCP",
+ "port": 1936,
+ "targetPort": 1936
+ },
+ {
+ "name": "5000-tcp",
+ "protocol": "TCP",
+ "port": 5000,
+ "targetPort": 5000
+ }
+ ],
+ "selector": {
+ "router": "router"
+ },
+ "clusterIP": "172.30.129.161",
+ "externalIPs": ["1.2.3.4", "5.6.7.8"],
+ "type": "ClusterIP",
+ "sessionAffinity": "None"
+ },
+ "status": {
+ "loadBalancer": {}
+ }
+ }'''
+ mock_cmd.side_effect = [
+ (1, '', 'Error from server: services "router" not found'),
+ (1, '', 'Error from server: services "router" not found'),
+ (0, service, ''),
+ (0, service, '')
+ ]
+
+ mock_tmpfile_copy.side_effect = [
+ '/tmp/mocked_kubeconfig',
+ ]
+
+ results = OCService.run_ansible(params, False)
+
+ self.assertTrue(results['changed'])
+ self.assertTrue(results['results']['returncode'] == 0)
+ self.assertEqual(results['results']['results'][0]['metadata']['name'], 'router')
+ self.assertEqual(results['results']['results'][0]['metadata']['labels'], {"component": "some_component", "infra": "true"})
+ self.assertEqual(results['results']['results'][0]['spec']['externalIPs'], ["1.2.3.4", "5.6.7.8"])
diff --git a/roles/nuage_master/defaults/main.yaml b/roles/nuage_master/defaults/main.yaml
deleted file mode 100644
index c90f4f443..000000000
--- a/roles/nuage_master/defaults/main.yaml
+++ /dev/null
@@ -1,4 +0,0 @@
----
-nuage_master_cspadminpasswd: ""
-nuage_master_adminusername: admin
-nuage_master_adminuserpasswd: admin
diff --git a/roles/nuage_master/tasks/main.yaml b/roles/nuage_master/tasks/main.yaml
index fefd28bbd..4f8adb63e 100644
--- a/roles/nuage_master/tasks/main.yaml
+++ b/roles/nuage_master/tasks/main.yaml
@@ -33,6 +33,14 @@
- include: certificates.yml
+- name: Install Nuage VSD user certificate
+ become: yes
+ copy: src="{{ vsd_user_cert_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_cert_file | basename }}"
+
+- name: Install Nuage VSD user key
+ become: yes
+ copy: src="{{ vsd_user_key_file }}" dest="{{ cert_output_dir }}/{{ vsd_user_key_file | basename }}"
+
- name: Create nuage-openshift-monitor.yaml
become: yes
template: src=nuage-openshift-monitor.j2 dest=/usr/share/nuage-openshift-monitor/nuage-openshift-monitor.yaml owner=root mode=0644
diff --git a/roles/nuage_master/templates/nuage-openshift-monitor.j2 b/roles/nuage_master/templates/nuage-openshift-monitor.j2
index de2a97e37..e077128a4 100644
--- a/roles/nuage_master/templates/nuage-openshift-monitor.j2
+++ b/roles/nuage_master/templates/nuage-openshift-monitor.j2
@@ -15,12 +15,10 @@ vspVersion: {{ vsp_version }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
-# CSP admin user's password
-cspAdminPassword: {{ nuage_master_cspadminpasswd }}
-# Enterprise admin user name
-enterpriseAdminUser: {{ nuage_master_adminusername }}
-# Enterprise admin password
-enterpriseAdminPassword: {{ nuage_master_adminuserpasswd }}
+# VSD generated user certificate file location on master node
+userCertificateFile: {{ cert_output_dir }}/{{ vsd_user_cert_file | basename }}
+# VSD generated user key file location on master node
+userKeyFile: {{ cert_output_dir }}/{{ vsd_user_key_file | basename }}
# Location where logs should be saved
log_dir: {{ nuage_mon_rest_server_logdir }}
# Monitor rest server parameters
diff --git a/roles/nuage_node/tasks/main.yaml b/roles/nuage_node/tasks/main.yaml
index d82dd36a4..928f9e2e6 100644
--- a/roles/nuage_node/tasks/main.yaml
+++ b/roles/nuage_node/tasks/main.yaml
@@ -20,6 +20,21 @@
become: yes
yum: name={{ plugin_rpm }} state=present
+- name: Assure CNI conf dir exists
+ become: yes
+ file: path="{{ cni_conf_dir }}" state=directory
+
+- name: Assures Openshift CNI bin dir exists
+ become: yes
+ file: path="{{ cni_bin_dir }}" state=directory
+
+- name: Install CNI loopback plugin
+ become: yes
+ copy:
+ src: "{{ k8s_cni_loopback_plugin }}"
+ dest: "{{ cni_bin_dir }}/{{ k8s_cni_loopback_plugin | basename }}"
+ mode: 0755
+
- name: Copy the certificates and keys
become: yes
copy: src="/tmp/{{ item }}" dest="{{ vsp_openshift_dir }}/{{ item }}"
diff --git a/roles/nuage_node/templates/vsp-openshift.j2 b/roles/nuage_node/templates/vsp-openshift.j2
index d3c0a122a..9fab53906 100644
--- a/roles/nuage_node/templates/vsp-openshift.j2
+++ b/roles/nuage_node/templates/vsp-openshift.j2
@@ -8,6 +8,8 @@ CACert: {{ ca_cert }}
enterpriseName: {{ enterprise }}
# Name of the domain in which pods will reside
domainName: {{ domain }}
+# Name of the VSD user in admin group
+vsdUser: {{ vsduser }}
# IP address and port number of master API server
masterApiServer: {{ api_server }}
# REST server URL
diff --git a/roles/nuage_node/vars/main.yaml b/roles/nuage_node/vars/main.yaml
index 7b789152f..4cf68411f 100644
--- a/roles/nuage_node/vars/main.yaml
+++ b/roles/nuage_node/vars/main.yaml
@@ -19,4 +19,7 @@ nuage_plugin_rest_client_crt_dir: "{{ nuage_ca_master_crt_dir }}/{{ ansible_node
nuage_ca_master_plugin_key: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.key"
nuage_ca_master_plugin_crt: "{{ nuage_plugin_rest_client_crt_dir }}/nuageMonClient.crt"
+cni_conf_dir: "/etc/cni/net.d/"
+cni_bin_dir: "/opt/cni/bin/"
+
nuage_plugin_crt_dir: /usr/share/vsp-openshift
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index c7b906949..b9a7ec32f 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -108,6 +108,38 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
+- name: Test local loopback context
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} config view
+ --config={{ openshift_master_loopback_config }}
+ changed_when: false
+ register: loopback_config
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
+- name: Generate the loopback master client config
+ command: >
+ {{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
+ {% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
+ --certificate-authority {{ named_ca_certificate }}
+ {% endfor %}
+ --certificate-authority={{ openshift_ca_cert }}
+ --client-dir={{ openshift_ca_config_dir }}
+ --groups=system:masters,system:openshift-master
+ --master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[openshift_ca_host].openshift.master.loopback_api_url }}
+ --signer-cert={{ openshift_ca_cert }}
+ --signer-key={{ openshift_ca_key }}
+ --signer-serial={{ openshift_ca_serial }}
+ --user=system:openshift-master
+ --basename=openshift-master
+ {% if openshift_version | oo_version_gte_3_5_or_1_5(openshift.common.deployment_type) | bool %}
+ --expire-days={{ openshift_master_cert_expire_days }}
+ {% endif %}
+ when: loopback_context_string not in loopback_config.stdout
+ delegate_to: "{{ openshift_ca_host }}"
+ run_once: true
+
- name: Restore original serviceaccount keys
copy:
src: "{{ item }}.keep"
diff --git a/roles/openshift_ca/vars/main.yml b/roles/openshift_ca/vars/main.yml
index a32e385ec..d04c1766d 100644
--- a/roles/openshift_ca/vars/main.yml
+++ b/roles/openshift_ca/vars/main.yml
@@ -4,3 +4,6 @@ openshift_ca_cert: "{{ openshift_ca_config_dir }}/ca.crt"
openshift_ca_key: "{{ openshift_ca_config_dir }}/ca.key"
openshift_ca_serial: "{{ openshift_ca_config_dir }}/ca.serial.txt"
openshift_version: "{{ openshift_pkg_version | default('') }}"
+
+openshift_master_loopback_config: "{{ openshift_ca_config_dir }}/openshift-master.kubeconfig"
+loopback_context_string: "current-context: {{ openshift.master.loopback_context_name }}"
diff --git a/roles/openshift_certificate_expiry/README.md b/roles/openshift_certificate_expiry/README.md
index 107e27f89..f19a421cb 100644
--- a/roles/openshift_certificate_expiry/README.md
+++ b/roles/openshift_certificate_expiry/README.md
@@ -54,7 +54,7 @@ included in this role, or you can [read on below for more examples](#more-exampl
to help you craft you own.
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
```
Using the `easy-mode.yaml` playbook will produce:
@@ -65,7 +65,7 @@ Using the `easy-mode.yaml` playbook will produce:
> **Note:** If you are running from an RPM install use
-> `/usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml`
+> `/usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml`
> instead
## Run from a container
@@ -80,7 +80,7 @@ There are several [examples](../../examples/README.md) in the `examples` directo
## More Example Playbooks
> **Note:** These Playbooks are available to run directly out of the
-> [/playbooks/certificate_expiry/](../../playbooks/certificate_expiry/) directory.
+> [/playbooks/byo/openshift-checks/certificate_expiry/](../../playbooks/byo/openshift-checks/certificate_expiry/) directory.
### Default behavior
@@ -99,14 +99,14 @@ This playbook just invokes the certificate expiration check role with default op
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/default.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/default.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/default.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/default.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/default.yaml)
### Easy mode
@@ -130,14 +130,14 @@ certificates (healthy or not) are included in the results:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/easy-mode.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode.yaml)
### Easy mode and upload reports to masters
@@ -193,14 +193,14 @@ options via environment variables:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/easy-mode-upload.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/easy-mode-upload.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/easy-mode-upload.yaml)
### Generate HTML and JSON artifacts in their default paths
@@ -219,14 +219,14 @@ $ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/ce
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_default_paths.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_default_paths.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_default_paths.yaml)
### Generate HTML and JSON reports in a custom path
@@ -250,14 +250,14 @@ This example customizes the report generation path to point to a specific path (
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/html_and_json_timestamp.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/html_and_json_timestamp.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/html_and_json_timestamp.yaml)
### Long warning window
@@ -278,14 +278,14 @@ the module out):
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/longer_warning_period.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/longer_warning_period.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer_warning_period.yaml)
### Long warning window and JSON report
@@ -307,14 +307,14 @@ the module out) and save the results as a JSON file:
**From git:**
```
-$ ansible-playbook -v -i HOSTS playbooks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
**From openshift-ansible-playbooks rpm:**
```
-$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/certificate_expiry/longer-warning-period-json-results.yaml
+$ ansible-playbook -v -i HOSTS /usr/share/ansible/openshift-ansible/playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml
```
-> [View This Playbook](../../playbooks/certificate_expiry/longer-warning-period-json-results.yaml)
+> [View This Playbook](../../playbooks/byo/openshift-checks/certificate_expiry/longer-warning-period-json-results.yaml)
diff --git a/roles/openshift_docker_facts/tasks/main.yml b/roles/openshift_docker_facts/tasks/main.yml
index 350512452..95e94171d 100644
--- a/roles/openshift_docker_facts/tasks/main.yml
+++ b/roles/openshift_docker_facts/tasks/main.yml
@@ -17,6 +17,9 @@
hosted_registry_insecure: "{{ openshift_docker_hosted_registry_insecure | default(openshift.docker.hosted_registry_insecure | default(False)) }}"
hosted_registry_network: "{{ openshift_docker_hosted_registry_network | default(None) }}"
use_system_container: "{{ openshift_docker_use_system_container | default(False) }}"
+ - role: node
+ local_facts:
+ sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
- set_fact:
docker_additional_registries: "{{ openshift.docker.additional_registries
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index c7e51bbfc..f3f270c40 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -7,8 +7,7 @@
XPAAS_VERSION=ose-v1.3.6
ORIGIN_VERSION=${1:-v3.6}
-RHAMP_TAG=1.0.0.GA
-RHAMP_TEMPLATE=https://raw.githubusercontent.com/3scale/rhamp-openshift-templates/${RHAMP_TAG}/apicast-gateway/apicast-gateway-template.yml
+RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
find ${EXAMPLES_BASE} -name '*.json' -delete
TEMP=`mktemp -d`
@@ -17,9 +16,11 @@ pushd $TEMP
wget https://github.com/openshift/origin/archive/master.zip -O origin-master.zip
wget https://github.com/jboss-fuse/application-templates/archive/GA.zip -O fis-GA.zip
wget https://github.com/jboss-openshift/application-templates/archive/${XPAAS_VERSION}.zip -O application-templates-master.zip
+wget https://github.com/3scale/rhamp-openshift-templates/archive/${RHAMP_TAG}.zip -O amp.zip
unzip origin-master.zip
unzip application-templates-master.zip
unzip fis-GA.zip
+unzip amp.zip
mv origin-master/examples/db-templates/* ${EXAMPLES_BASE}/db-templates/
mv origin-master/examples/quickstarts/* ${EXAMPLES_BASE}/quickstart-templates/
mv origin-master/examples/jenkins/jenkins-*template.json ${EXAMPLES_BASE}/quickstart-templates/
@@ -30,15 +31,11 @@ mv application-templates-${XPAAS_VERSION}/jboss-image-streams.json ${EXAMPLES_BA
mv application-templates-GA/fis-image-streams.json ${EXAMPLES_BASE}/xpaas-streams/fis-image-streams.json
mv application-templates-GA/quickstarts/* ${EXAMPLES_BASE}/xpaas-templates/
find application-templates-${XPAAS_VERSION}/ -name '*.json' ! -wholename '*secret*' ! -wholename '*demo*' -exec mv {} ${EXAMPLES_BASE}/xpaas-templates/ \;
+find 3scale-amp-openshift-templates-${RHAMP_TAG}/ -name '*.yml' -exec mv {} ${EXAMPLES_BASE}/quickstart-templates/ \;
popd
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/dotnet_imagestreams.json -O ${EXAMPLES_BASE}/image-streams/dotnet_imagestreams.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-example.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-example.json
wget https://raw.githubusercontent.com/redhat-developer/s2i-dotnetcore/master/templates/dotnet-pgsql-persistent.json -O ${EXAMPLES_BASE}/quickstart-templates/dotnet-pgsql-persistent.json
-wget ${RHAMP_TEMPLATE} -O ${EXAMPLES_BASE}/quickstart-templates/apicast-gateway-template.yml
-wget https://raw.githubusercontent.com/openshift/origin-metrics/master/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-metrics/enterprise/metrics.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/metrics-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/master/deployer/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/origin/logging-deployer.yaml
-wget https://raw.githubusercontent.com/openshift/origin-aggregated-logging/enterprise/deployment/deployer.yaml -O ../openshift_hosted_templates/files/${ORIGIN_VERSION}/enterprise/logging-deployer.yaml
git diff files/examples
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..fef86ff5a
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.4/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.4.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml
new file mode 100644
index 000000000..240f6cbdf
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-pv-example.yaml
@@ -0,0 +1,58 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+parameters:
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging)
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: h-services-pv
+ labels:
+ type: h-services
+ spec:
+ capacity:
+ storage: ${HAWKULAR_SERVICES_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-services
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cassandra-pv
+ labels:
+ type: cassandra
+ spec:
+ capacity:
+ storage: ${CASSANDRA_DATA_LIMIT}
+ accessModes:
+ - ReadWriteOnce
+ persistentVolumeReclaimPolicy: Retain
+ hostPath:
+ path: /tmp/pv-cassandra
diff --git a/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml
new file mode 100644
index 000000000..bbc0c7044
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v1.5/cfme-templates/jboss-middleware-manager-template.yaml
@@ -0,0 +1,254 @@
+#
+# Copyright 2016-2017 Red Hat, Inc. and/or its affiliates
+# and other contributors as indicated by the @author tags.
+#
+# Licensed under the Apache License, Version 2.0 (the "License");
+# you may not use this file except in compliance with the License.
+# You may obtain a copy of the License at
+#
+# http://www.apache.org/licenses/LICENSE-2.0
+#
+# Unless required by applicable law or agreed to in writing, software
+# distributed under the License is distributed on an "AS IS" BASIS,
+# WITHOUT WARRANTIES OR CONDITIONS OF ANY KIND, either express or implied.
+# See the License for the specific language governing permissions and
+# limitations under the License.
+#
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: hawkular-services
+ annotations:
+ openshift.io/display-name: Hawkular Services
+ description: Hawkular-Services all-in-one (including Hawkular Metrics, Hawkular Alerts and Hawkular Inventory).
+ iconClass: icon-wildfly
+ tags: hawkular,hawkular-services,metrics,alerts,manageiq,cassandra
+
+parameters:
+- name: HAWKULAR_SERVICES_IMAGE
+ description: What docker image should be used for hawkular-services.
+ displayName: Hawkular Services Docker Image
+ value: registry.access.redhat.com/jboss-mm-7-tech-preview/middleware-manager:latest
+- name: CASSANDRA_IMAGE
+ description: What docker image should be used for cassandra node.
+ displayName: Cassandra Docker Image
+ value: registry.access.redhat.com/openshift3/metrics-cassandra:3.5.0
+- name: CASSANDRA_MEMORY_LIMIT
+ description: Maximum amount of memory for Cassandra container.
+ displayName: Cassandra Memory Limit
+ value: 2Gi
+- name: CASSANDRA_DATA_LIMIT
+ description: Maximum amount data used by Cassandra container.
+ displayName: Cassandra Container Data Limit
+ value: 2Gi
+- name: HAWKULAR_SERVICES_DATA_LIMIT
+ description: Maximum amount data used by hawkular-services container (mostly logging).
+ displayName: Hawkular Services Container Data Limit
+ value: 1Gi
+- name: ROUTE_NAME
+ description: Public route with this name will be created.
+ displayName: Route Name
+ value: hawkular-services
+- name: ROUTE_HOSTNAME
+ description: Under this hostname the Hawkular Services will be accessible, if left blank a value will be defaulted.
+ displayName: Hostname
+- name: HAWKULAR_USER
+ description: Username that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular User
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+- name: HAWKULAR_PASSWORD
+ description: Password that is used for accessing the Hawkular Services, if left blank a value will be generated.
+ displayName: Hawkular Password
+ from: '[a-zA-Z0-9]{16}'
+ generate: expression
+labels:
+ template: hawkular-services
+message: Credentials for hawkular-services are ${HAWKULAR_USER}:${HAWKULAR_PASSWORD}
+
+objects:
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances the application pods
+ service.alpha.openshift.io/dependencies: '[{"name":"hawkular-cassandra","namespace":"","kind":"Service"}]'
+ name: hawkular-services
+ spec:
+ ports:
+ - name: http-8080-tcp
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: admin-9990-tcp
+ port: 9990
+ protocol: TCP
+ targetPort: 9990
+ selector:
+ name: hawkular-services
+ type: ClusterIP
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Cassandra Service
+ name: hawkular-cassandra
+ spec:
+ ports:
+ - name: cql-9042-tcp
+ port: 9042
+ protocol: TCP
+ targetPort: 9042
+ selector:
+ name: hawkular-cassandra
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: ${ROUTE_NAME}
+ spec:
+ host: ${ROUTE_HOSTNAME}
+ to:
+ kind: Service
+ name: hawkular-services
+ port:
+ targetPort: http-8080-tcp
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the application server
+ name: hawkular-services
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-services
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: hawkular-services
+ spec:
+ containers:
+ - image: ${HAWKULAR_SERVICES_IMAGE}
+ env:
+ - name: HAWKULAR_BACKEND
+ value: remote
+ - name: CASSANDRA_NODES
+ value: hawkular-cassandra
+ - name: HAWKULAR_USER
+ value: ${HAWKULAR_USER}
+ - name: HAWKULAR_PASSWORD
+ value: ${HAWKULAR_PASSWORD}
+ imagePullPolicy: IfNotPresent
+ name: hawkular-services
+ volumeMounts:
+ - name: h-services-data
+ mountPath: /var/opt/hawkular
+ ports:
+ - containerPort: 8080
+ - containerPort: 9990
+ livenessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 180
+ timeoutSeconds: 3
+ readinessProbe:
+ exec:
+ command:
+ - /opt/hawkular/bin/ready.sh
+ initialDelaySeconds: 120
+ timeoutSeconds: 3
+ periodSeconds: 5
+ successThreshold: 1
+ failureThreshold: 12
+ resources:
+ requests:
+ memory: 1024Mi
+ cpu: 2000m
+ dnsPolicy: ClusterFirst
+ restartPolicy: Always
+ volumes:
+ - name: h-services-data
+ persistentVolumeClaim:
+ claimName: h-services-pvc
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ annotations:
+ description: Defines how to deploy the cassandra
+ name: hawkular-cassandra
+ spec:
+ replicas: 1
+ selector:
+ name: hawkular-cassandra
+ strategy:
+ type: Recreate
+ rollingParams:
+ timeoutSeconds: 300
+ template:
+ metadata:
+ labels:
+ name: hawkular-cassandra
+ spec:
+ containers:
+ - image: ${CASSANDRA_IMAGE}
+ imagePullPolicy: Always
+ name: hawkular-cassandra
+ env:
+ - name: DATA_VOLUME
+ value: /var/lib/cassandra
+ volumeMounts:
+ - name: cassandra-data
+ mountPath: /var/lib/cassandra
+ ports:
+ - containerPort: 9042
+ - containerPort: 9160
+ readinessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ livenessProbe:
+ exec:
+ command: ['nodetool', 'status']
+ initialDelaySeconds: 300
+ timeoutSeconds: 10
+ periodSeconds: 15
+ successThreshold: 1
+ failureThreshold: 3
+ resources:
+ limits:
+ memory: ${CASSANDRA_MEMORY_LIMIT}
+ volumes:
+ - name: cassandra-data
+ persistentVolumeClaim:
+ claimName: cassandra-pvc
+
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: h-services-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: cassandra-pvc
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 1Gi
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
index a81dbb654..2583018b7 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-centos7.json
@@ -103,7 +103,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +137,22 @@
"kind": "DockerImage",
"name": "centos/nodejs-4-centos7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/6/README.md.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/nodejs-6-centos7:latest"
+ }
}
]
}
@@ -407,7 +423,7 @@
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
"supports":"jee,java",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "ImageStreamTag",
@@ -423,7 +439,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:8.1,jee,java",
"version": "8.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -439,7 +455,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:9.0,jee,java",
"version": "9.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -455,7 +471,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.0,jee,java",
"version": "10.0",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
@@ -471,7 +487,7 @@
"tags": "builder,wildfly,java",
"supports":"wildfly:10.1,jee,java",
"version": "10.1",
- "sampleRepo": "https://github.com/bparees/openshift-jee-sample.git"
+ "sampleRepo": "https://github.com/openshift/openshift-jee-sample.git"
},
"from": {
"kind": "DockerImage",
diff --git a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
index 2ed0efe1e..b65f0a5e3 100644
--- a/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.6/image-streams/image-streams-rhel7.json
@@ -103,7 +103,7 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "4"
+ "name": "6"
}
},
{
@@ -137,6 +137,22 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/nodejs-4-rhel7:latest"
}
+ },
+ {
+ "name": "6",
+ "annotations": {
+ "openshift.io/display-name": "Node.js 6",
+ "description": "Build and run Node.js 6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.",
+ "iconClass": "icon-nodejs",
+ "tags": "builder,nodejs",
+ "supports":"nodejs:6,nodejs",
+ "version": "6",
+ "sampleRepo": "https://github.com/openshift/nodejs-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/nodejs-6-rhel7:latest"
+ }
}
]
}
@@ -253,7 +269,7 @@
"tags": "hidden,builder,php",
"supports":"php:5.5,php",
"version": "5.5",
- "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
+ "sampleRepo": "https://github.com/openshift/cakephp-ex.git"
},
"from": {
"kind": "DockerImage",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml
new file mode 100644
index 000000000..4e469f6e8
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/amp.yml
@@ -0,0 +1,1261 @@
+base_env: &base_env
+- name: RAILS_ENV
+ value: "production"
+- name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+- name: FORCE_SSL
+ value: "true"
+- name: THREESCALE_SUPERDOMAIN
+ value: "${WILDCARD_DOMAIN}"
+- name: TENANT_NAME
+ value: "${TENANT_NAME}"
+- name: APICAST_ACCESS_TOKEN
+ value: "${APICAST_ACCESS_TOKEN}"
+- name: ADMIN_ACCESS_TOKEN
+ value: "${ADMIN_ACCESS_TOKEN}"
+- name: PROVIDER_PLAN
+ value: 'enterprise'
+- name: USER_LOGIN
+ value: "${ADMIN_USERNAME}"
+- name: USER_PASSWORD
+ value: "${ADMIN_PASSWORD}"
+- name: RAILS_LOG_TO_STDOUT
+ value: "true"
+- name: RAILS_LOG_LEVEL
+ value: "info"
+- name: THINKING_SPHINX_ADDRESS
+ value: "system-sphinx"
+- name: THINKING_SPHINX_PORT
+ value: "9306"
+- name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "/tmp/sphinx.conf"
+- name: EVENTS_SHARED_SECRET
+ value: "${SYSTEM_BACKEND_SHARED_SECRET}"
+- name: THREESCALE_SANDBOX_PROXY_OPENSSL_VERIFY_MODE
+ value: "VERIFY_NONE"
+- name: APICAST_BACKEND_ROOT_ENDPOINT
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+- name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+- name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+- name: SECRET_KEY_BASE
+ value: "${SYSTEM_APP_SECRET_KEY_BASE}"
+- name: AMP_RELEASE
+ value: "${AMP_RELEASE}"
+- name: SMTP_ADDRESS
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: address
+- name: SMTP_USER_NAME
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: username
+- name: SMTP_PASSWORD
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: password
+- name: SMTP_DOMAIN
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: domain
+- name: SMTP_PORT
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: port
+- name: SMTP_AUTHENTICATION
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: authentication
+- name: SMTP_OPENSSL_VERIFY_MODE
+ valueFrom:
+ configMapKeyRef:
+ name: smtp
+ key: openssl.verify.mode
+- name: BACKEND_ROUTE
+ value: "https://backend-${TENANT_NAME}.${WILDCARD_DOMAIN}"
+
+apiVersion: v1
+kind: Template
+metadata:
+ name: "system"
+message: "Login on https://${TENANT_NAME}-admin.${WILDCARD_DOMAIN} as ${ADMIN_USERNAME}/${ADMIN_PASSWORD}"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-cron
+ spec:
+ replicas: 1
+ selector:
+ name: backend-cron
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-cron
+ spec:
+ containers:
+ - args:
+ - backend-cron
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-cron
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-redis
+ spec:
+ replicas: 1
+ selector:
+ name: backend-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: backend-redis
+ spec:
+ containers:
+ - image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: backend-redis
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 1
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ volumeMounts:
+ - name: backend-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ volumes:
+ - name: backend-redis-storage
+ persistentVolumeClaim:
+ claimName: backend-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-listener
+ spec:
+ replicas: 1
+ selector:
+ name: backend-listener
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-listener
+ spec:
+ containers:
+ - args:
+ - 3scale_backend
+ - start
+ - "-e"
+ - production
+ - "-p"
+ - '3000'
+ - "-x"
+ - "/dev/stdout"
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_INTERNAL_API_USER
+ value: "${SYSTEM_BACKEND_USERNAME}"
+ - name: CONFIG_INTERNAL_API_PASSWORD
+ value: "${SYSTEM_BACKEND_PASSWORD}"
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-listener
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3000
+ readinessProbe:
+ httpGet:
+ path: "/status"
+ port: 3000
+ initialDelaySeconds: 30
+ timeoutSeconds: 5
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ selector:
+ name: backend-redis
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: backend-listener
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: 3000
+ name: http
+ selector:
+ name: backend-listener
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-provider
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: provider
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-developer
+ spec:
+ ports:
+ - port: 3000
+ protocol: TCP
+ targetPort: developer
+ name: http
+ selector:
+ name: system-app
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: backend-worker
+ spec:
+ replicas: 1
+ selector:
+ name: backend-worker
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: backend-worker
+ spec:
+ containers:
+ - args:
+ - 3scale_backend_worker
+ - run
+ env:
+ - name: CONFIG_REDIS_PROXY
+ value: "backend-redis:6379"
+ - name: CONFIG_QUEUES_MASTER_NAME
+ value: "backend-redis:6379/1"
+ - name: RACK_ENV
+ value: "production"
+ - name: CONFIG_EVENTS_HOOK
+ value: http://system-provider:3000/master/events/import
+ - name: CONFIG_EVENTS_HOOK_SHARED_SECRET
+ value: ${SYSTEM_BACKEND_SHARED_SECRET}
+ image: 3scale-amp20/backend:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: backend-worker
+ triggers:
+ - type: ConfigChange
+
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ ports:
+ - name: system-mysql
+ protocol: TCP
+ port: 3306
+ targetPort: 3306
+ nodePort: 0
+ selector:
+ name: 'system-mysql'
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-redis
+ spec:
+ ports:
+ - port: 6379
+ protocol: TCP
+ targetPort: 6379
+ name: redis
+ selector:
+ name: system-redis
+
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-redis
+ spec:
+ replicas: 1
+ selector:
+ name: system-redis
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ labels:
+ name: system-redis
+ spec:
+ containers:
+ - args:
+ image: ${REDIS_IMAGE}
+ imagePullPolicy: IfNotPresent
+ name: system-redis
+ terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - name: system-redis-storage
+ mountPath: "/var/lib/redis/data"
+ - name: redis-config
+ mountPath: /etc/redis.conf
+ subPath: redis.conf
+ readinessProbe:
+ exec:
+ command:
+ - "container-entrypoint"
+ - "bash"
+ - "-c"
+ - "redis-cli set liveness-probe \"`date`\" | grep OK"
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 6379
+ initialDelaySeconds: 10
+ periodSeconds: 5
+ volumes:
+ - name: system-redis-storage
+ persistentVolumeClaim:
+ claimName: system-redis-storage
+ - name: redis-config
+ configMap:
+ name: redis-config
+ items:
+ - key: redis.conf
+ path: redis.conf
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-sphinx
+ spec:
+ ports:
+ - port: 9306
+ protocol: TCP
+ targetPort: 9306
+ name: sphinx
+ selector:
+ name: system-sphinx
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sphinx
+ spec:
+ replicas: 1
+ selector:
+ name: system-sphinx
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sphinx
+ spec:
+ volumes:
+ - name: system-sphinx-database
+ emptyDir: {}
+ containers:
+ - args:
+ - rake
+ - 'openshift:thinking_sphinx:start'
+ volumeMounts:
+ - name: system-sphinx-database
+ mountPath: "/opt/system/db/sphinx"
+ env:
+ - name: RAILS_ENV
+ value: production
+ - name: DATABASE_URL
+ value: "mysql2://root:${MYSQL_ROOT_PASSWORD}@system-mysql/${MYSQL_DATABASE}"
+ - name: THINKING_SPHINX_ADDRESS
+ value: 0.0.0.0
+ - name: THINKING_SPHINX_CONFIGURATION_FILE
+ value: "db/sphinx/production.conf"
+ - name: THINKING_SPHINX_PID_FILE
+ value: db/sphinx/searchd.pid
+ - name: DELTA_INDEX_INTERVAL
+ value: '5'
+ - name: FULL_REINDEX_INTERVAL
+ value: '60'
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sphinx
+ livenessProbe:
+ tcpSocket:
+ port: 9306
+ initialDelaySeconds: 60
+ periodSeconds: 10
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: system-memcache
+ spec:
+ ports:
+ - port: 11211
+ protocol: TCP
+ targetPort: 11211
+ name: memcache
+ selector:
+ name: system-memcache
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-memcache
+ spec:
+ replicas: 1
+ selector:
+ name: system-memcache
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-memcache
+ spec:
+ containers:
+ - args:
+ env:
+ image: 3scale-amp20/memcached:1.4.15-7
+ imagePullPolicy: IfNotPresent
+ name: memcache
+ readinessProbe:
+ exec:
+ command:
+ - "sh"
+ - "-c"
+ - "echo version | nc $HOSTNAME 11211 | grep VERSION"
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ timeoutSeconds: 5
+ livenessProbe:
+ tcpSocket:
+ port: 11211
+ initialDelaySeconds: 10
+ periodSeconds: 10
+ command:
+ - "memcached"
+ - "-m"
+ - "64"
+ ports:
+ - containerPort: 6379
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-provider-admin-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}-admin.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-provider
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: backend-route
+ labels:
+ app: system-route
+ spec:
+ host: backend-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: backend-listener
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: system-developer-route
+ labels:
+ app: system-route
+ spec:
+ host: ${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: system-developer
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-staging
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-staging
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-staging
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: http://${APICAST_ACCESS_TOKEN}@system-provider:3000
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "sandbox"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/2"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-staging
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-staging
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-staging
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-production
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-production
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 1800
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-production
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ value: "http://${APICAST_ACCESS_TOKEN}@system-provider:3000"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "boot"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "300"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "production"
+ - name: APICAST_MANAGEMENT_API
+ value: "${APICAST_MANAGEMENT_API}"
+ - name: BACKEND_ENDPOINT_OVERRIDE
+ value: http://backend-listener:3000
+ - name: OPENSSL_VERIFY
+ value: '${APICAST_OPENSSL_VERIFY}'
+ - name: APICAST_RESPONSE_CODES
+ value: '${APICAST_RESPONSE_CODES}'
+ - name: REDIS_URL
+ value: "redis://system-redis:6379/1"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-production
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: 8090
+ initialDelaySeconds: 10
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: 8090
+ initialDelaySeconds: 15
+ timeoutSeconds: 5
+ periodSeconds: 30
+ ports:
+ - containerPort: 8080
+ protocol: TCP
+ - containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-production
+ spec:
+ ports:
+ - name: gateway
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: apicast-production
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-staging-route
+ labels:
+ app: apicast-staging
+ spec:
+ host: api-${TENANT_NAME}-apicast-staging.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-staging
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: api-apicast-production-route
+ labels:
+ app: apicast-production
+ spec:
+ host: api-${TENANT_NAME}-apicast-production.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-production
+ port:
+ targetPort: gateway
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-app
+ spec:
+ replicas: 1
+ selector:
+ name: system-app
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ pre:
+ failurePolicy: Retry
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:deploy
+ env: *base_env
+ volumes:
+ - system-storage
+ post:
+ failurePolicy: Abort
+ execNewPod:
+ containerName: system-provider
+ command:
+ - bash
+ - -c
+ - bundle exec rake boot openshift:post_deploy
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-app
+ spec:
+ containers:
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ command: ['env', 'TENANT_MODE=provider', 'PORT=3000', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ name: system-provider
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: provider
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: provider
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3000
+ protocol: TCP
+ name: provider
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ command: ['env', 'TENANT_MODE=developer', 'PORT=3001', 'container-entrypoint', 'bundle', 'exec', 'unicorn', '-c', 'config/unicorn.rb']
+ imagePullPolicy: IfNotPresent
+ name: system-developer
+ livenessProbe:
+ timeoutSeconds: 10
+ initialDelaySeconds: 20
+ tcpSocket:
+ port: developer
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /check.txt
+ port: developer
+ scheme: HTTP
+ httpHeaders:
+ - name: X-Forwarded-Proto
+ value: https
+ initialDelaySeconds: 30
+ timeoutSeconds: 10
+ periodSeconds: 30
+ ports:
+ - containerPort: 3001
+ protocol: TCP
+ name: developer
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ readOnly: true
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-resque
+ spec:
+ replicas: 1
+ selector:
+ name: system-resque
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-resque
+ spec:
+ containers:
+ - args:
+ - 'rake'
+ - 'resque:work'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-resque
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ - args:
+ - 'rake'
+ - 'resque:scheduler'
+ - 'QUEUE=*'
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-scheduler
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: system-sidekiq
+ spec:
+ replicas: 1
+ selector:
+ name: system-sidekiq
+ strategy:
+ rollingParams:
+ intervalSeconds: 1
+ maxSurge: 25%
+ maxUnavailable: 25%
+ timeoutSeconds: 600
+ updatePeriodSeconds: 1
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ name: system-sidekiq
+ spec:
+ containers:
+ - args:
+ - rake
+ - sidekiq:worker
+ env: *base_env
+ image: 3scale-amp20/system:1.0-2
+ imagePullPolicy: IfNotPresent
+ name: system-sidekiq
+ volumeMounts:
+ - name: system-storage
+ mountPath: /opt/system/public/system
+ volumes:
+ - name: system-storage
+ persistentVolumeClaim:
+ claimName: system-storage
+ triggers:
+ - type: ConfigChange
+
+
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: 'system-mysql'
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: 'system-mysql'
+ template:
+ metadata:
+ labels:
+ name: 'system-mysql'
+ spec:
+ containers:
+ - name: system-mysql
+ image: ${MYSQL_IMAGE}
+ ports:
+ - containerPort: 3306
+ protocol: TCP
+ resources:
+ limits:
+ memory: 2Gi
+ requests:
+ cpu: '1'
+ memory: 1Gi
+ readinessProbe:
+ timeoutSeconds: 5
+ initialDelaySeconds: 10
+ periodSeconds: 30
+ exec:
+ command:
+ - /bin/sh
+ - '-i'
+ - '-c'
+ - MYSQL_PWD="$MYSQL_PASSWORD" mysql -h 127.0.0.1 -u $MYSQL_USER -D $MYSQL_DATABASE -e 'SELECT 1'
+ livenessProbe:
+ initialDelaySeconds: 30
+ periodSeconds: 10
+ tcpSocket:
+ port: 3306
+ env:
+ - name: MYSQL_USER
+ value: ${MYSQL_USER}
+ - name: MYSQL_PASSWORD
+ value: ${MYSQL_PASSWORD}
+ - name: MYSQL_DATABASE
+ value: ${MYSQL_DATABASE}
+ - name: MYSQL_ROOT_PASSWORD
+ value: ${MYSQL_ROOT_PASSWORD}
+ - name: MYSQL_LOWER_CASE_TABLE_NAMES
+ value: "1"
+ volumeMounts:
+ - name: 'mysql-storage'
+ mountPath: /var/lib/mysql/data
+ imagePullPolicy: IfNotPresent
+ volumes:
+ - name: 'mysql-storage'
+ persistentVolumeClaim:
+ claimName: 'mysql-storage'
+- kind: ConfigMap
+ apiVersion: v1
+ metadata:
+ name: redis-config
+ data:
+ redis.conf: |
+ protected-mode no
+
+ port 6379
+
+ timeout 0
+ tcp-keepalive 300
+
+ daemonize no
+ supervised no
+
+ loglevel notice
+
+ databases 16
+
+ save 900 1
+ save 300 10
+ save 60 10000
+
+ stop-writes-on-bgsave-error yes
+
+ rdbcompression yes
+ rdbchecksum yes
+
+ dbfilename dump.rdb
+
+ slave-serve-stale-data yes
+ slave-read-only yes
+
+ repl-diskless-sync no
+ repl-disable-tcp-nodelay no
+
+ appendonly yes
+ appendfilename "appendonly.aof"
+ appendfsync everysec
+ no-appendfsync-on-rewrite no
+ auto-aof-rewrite-percentage 100
+ auto-aof-rewrite-min-size 64mb
+ aof-load-truncated yes
+
+ lua-time-limit 5000
+
+ activerehashing no
+
+ aof-rewrite-incremental-fsync yes
+ dir /var/lib/redis/data
+
+- kind: ConfigMap
+
+ apiVersion: v1
+ metadata:
+ name: smtp
+ data:
+ address: ""
+ username: ""
+ password: ""
+ domain: ""
+ port: ""
+ authentication: ""
+ openssl.verify.mode: ""
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: ADMIN_PASSWORD
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+- name: ADMIN_USERNAME
+ value: admin
+ required: true
+- name: APICAST_ACCESS_TOKEN
+ required: true
+ generate: expression
+ from: "[a-z0-9]{8}"
+ description: "Read Only Access Token that is APIcast going to use to download its configuration."
+- name: ADMIN_ACCESS_TOKEN
+ required: false
+ generate: expression
+ from: "[a-z0-9]{16}"
+ description: "Admin Access Token with all scopes and write permissions for API access."
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Tenant name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
+- name: MYSQL_USER
+ displayName: MySQL User
+ description: Username for MySQL user that will be used for accessing the database.
+ value: "mysql"
+ required: true
+- name: MYSQL_PASSWORD
+ displayName: MySQL Password
+ description: Password for the MySQL user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: MYSQL_DATABASE
+ displayName: MySQL Database Name
+ description: Name of the MySQL database accessed.
+ value: "system"
+ required: true
+- name: MYSQL_ROOT_PASSWORD
+ displayName: MySQL Root password.
+ description: Password for Root user.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_BACKEND_USERNAME
+ description: Internal 3scale API username for internal 3scale api auth.
+ value: "3scale_api_user"
+ required: true
+- name: SYSTEM_BACKEND_PASSWORD
+ description: Internal 3scale API password for internal 3scale api auth.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: REDIS_IMAGE
+ description: Redis image to use
+ required: true
+ value: rhscl/redis-32-rhel7:3.2-5.7
+- name: MYSQL_IMAGE
+ description: Mysql image to use
+ required: true
+ value: rhscl/mysql-56-rhel7:5.6-13.14
+- name: SYSTEM_BACKEND_SHARED_SECRET
+ description: Shared secret to import events from backend to system.
+ generate: expression
+ from: "[a-z0-9]{8}"
+ required: true
+- name: SYSTEM_APP_SECRET_KEY_BASE
+ description: System application secret key base
+ generate: expression
+ from: "[a-f0-9]{128}"
+ required: true
+- name: APICAST_MANAGEMENT_API
+ description: "Scope of the APIcast Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: APICAST_OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification when downloading the configuration. Can be set to true/false."
+ required: false
+ value: "false"
+- name: APICAST_RESPONSE_CODES
+ description: "Enable logging response codes in APIcast."
+ value: "true"
+ required: false
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
index 34f5fcbcc..e69de29bb 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast-gateway-template.yml
@@ -1,149 +0,0 @@
-apiVersion: v1
-kind: Template
-metadata:
- creationTimestamp: null
- name: 3scale-gateway
- annotations:
- description: "3scale API Gateway"
- iconClass: "icon-load-balancer"
- tags: "api,gateway,3scale"
-objects:
-- apiVersion: v1
- kind: DeploymentConfig
- metadata:
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- replicas: 2
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- strategy:
- resources: {}
- rollingParams:
- intervalSeconds: 1
- maxSurge: 25%
- maxUnavailable: 25%
- timeoutSeconds: 600
- updatePeriodSeconds: 1
- type: Rolling
- template:
- metadata:
- labels:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- spec:
- containers:
- - env:
- - name: THREESCALE_PORTAL_ENDPOINT
- valueFrom:
- secretKeyRef:
- name: ${THREESCALE_PORTAL_ENDPOINT_SECRET}
- key: password
- - name: THREESCALE_CONFIG_FILE
- value: ${THREESCALE_CONFIG_FILE}
- - name: RESOLVER
- value: ${RESOLVER}
- - name: APICAST_SERVICES
- value: ${APICAST_SERVICES}
- - name: APICAST_MISSING_CONFIGURATION
- value: ${MISSING_CONFIGURATION}
- - name: APICAST_LOG_LEVEL
- value: ${APICAST_LOG_LEVEL}
- - name: APICAST_PATH_ROUTING_ENABLED
- value: ${PATH_ROUTING}
- - name: APICAST_RESPONSE_CODES
- value: ${RESPONSE_CODES}
- - name: APICAST_REQUEST_LOGS
- value: ${REQUEST_LOGS}
- - name: APICAST_RELOAD_CONFIG
- value: ${APICAST_RELOAD_CONFIG}
- image: ${THREESCALE_GATEWAY_IMAGE}
- imagePullPolicy: Always
- name: ${THREESCALE_GATEWAY_NAME}
- livenessProbe:
- httpGet:
- path: /status/live
- port: 8090
- initialDelaySeconds: 10
- timeoutSeconds: 1
- readinessProbe:
- httpGet:
- path: /status/ready
- port: 8090
- initialDelaySeconds: 15
- timeoutSeconds: 1
- ports:
- - containerPort: 8080
- protocol: TCP
- resources: {}
- terminationMessagePath: /dev/termination-log
- dnsPolicy: ClusterFirst
- restartPolicy: Always
- securityContext: {}
- terminationGracePeriodSeconds: 30
- triggers:
- - type: ConfigChange
- status: {}
-- apiVersion: v1
- kind: Service
- metadata:
- creationTimestamp: null
- name: ${THREESCALE_GATEWAY_NAME}
- spec:
- ports:
- - name: 8080-tcp
- port: 8080
- protocol: TCP
- targetPort: 8080
- selector:
- deploymentconfig: ${THREESCALE_GATEWAY_NAME}
- sessionAffinity: None
- type: ClusterIP
- status:
- loadBalancer: {}
-parameters:
-- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
- value: threescale-portal-endpoint-secret
- name: THREESCALE_PORTAL_ENDPOINT_SECRET
- required: true
-- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
- value:
- name: THREESCALE_CONFIG_FILE
- required: false
-- description: "Name for the 3scale API Gateway"
- value: threescalegw
- name: THREESCALE_GATEWAY_NAME
- required: true
-- description: "Docker image to use."
- value: 'rhamp10/apicast-gateway:1.0.0-4'
- name: THREESCALE_GATEWAY_IMAGE
- required: true
-- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
- value:
- name: RESOLVER
- required: false
-- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
- value:
- name: APICAST_SERVICES
- required: false
-- description: "What to do on missing or invalid configuration. Allowed values are: log, exit."
- value: exit
- required: false
- name: MISSING_CONFIGURATION
-- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
- name: APICAST_LOG_LEVEL
- required: false
-- description: "Enable path routing. Experimental feature."
- name: PATH_ROUTING
- required: false
- value: "false"
-- description: "Enable traffic logging to 3scale. Includes whole request and response."
- value: "false"
- name: REQUEST_LOGS
- required: false
-- description: "Enable logging response codes to 3scale."
- value: "false"
- name: RESPONSE_CODES
- required: false
-- description: "Reload config on every request"
- value: "false"
- name: APICAST_RELOAD_CONFIG
- required: false
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml
new file mode 100644
index 000000000..8e8051c0b
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/apicast.yml
@@ -0,0 +1,157 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: 3scale-gateway
+ annotations:
+ description: "3scale API Gateway"
+ iconClass: "icon-load-balancer"
+ tags: "api,gateway,3scale"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ replicas: 2
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: "${APICAST_NAME}"
+ spec:
+ containers:
+ - env:
+ - name: THREESCALE_PORTAL_ENDPOINT
+ valueFrom:
+ secretKeyRef:
+ name: "${CONFIGURATION_URL_SECRET}"
+ key: password
+ - name: THREESCALE_CONFIG_FILE
+ value: "${CONFIGURATION_FILE_PATH}"
+ - name: THREESCALE_DEPLOYMENT_ENV
+ value: "${DEPLOYMENT_ENVIRONMENT}"
+ - name: RESOLVER
+ value: "${RESOLVER}"
+ - name: APICAST_SERVICES
+ value: "${SERVICES_LIST}"
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "${CONFIGURATION_LOADER}"
+ - name: APICAST_LOG_LEVEL
+ value: "${LOG_LEVEL}"
+ - name: APICAST_PATH_ROUTING_ENABLED
+ value: "${PATH_ROUTING}"
+ - name: APICAST_RESPONSE_CODES
+ value: "${RESPONSE_CODES}"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "${CONFIGURATION_CACHE}"
+ - name: REDIS_URL
+ value: "${REDIS_URL}"
+ - name: APICAST_MANAGEMENT_API
+ value: "${MANAGEMENT_API}"
+ - name: OPENSSL_VERIFY
+ value: "${OPENSSL_VERIFY}"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: "${APICAST_NAME}"
+ livenessProbe:
+ httpGet:
+ path: /status/live
+ port: management
+ initialDelaySeconds: 10
+ timeoutSeconds: 1
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ ports:
+ - name: proxy
+ containerPort: 8080
+ protocol: TCP
+ - name: management
+ containerPort: 8090
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${APICAST_NAME}"
+ spec:
+ ports:
+ - name: proxy
+ port: 8080
+ protocol: TCP
+ targetPort: 8080
+ - name: management
+ port: 8090
+ protocol: TCP
+ targetPort: 8090
+ selector:
+ deploymentconfig: "${APICAST_NAME}"
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- description: "Name of the secret containing the THREESCALE_PORTAL_ENDPOINT with the access-token or provider key"
+ value: apicast-configuration-url-secret
+ name: CONFIGURATION_URL_SECRET
+ required: true
+- description: "Path to saved JSON file with configuration for the gateway. Has to be injected to the docker image as read only volume."
+ value:
+ name: CONFIGURATION_FILE_PATH
+ required: false
+- description: "Deployment environment. Can be sandbox or production."
+ value: production
+ name: DEPLOYMENT_ENVIRONMENT
+ required: true
+- description: "Name for the 3scale API Gateway"
+ value: apicast
+ name: APICAST_NAME
+ required: true
+- description: "DNS Resolver for openresty, if empty it will be autodiscovered"
+ value:
+ name: RESOLVER
+ required: false
+- description: "Subset of services to run. Use comma separated list of service ids (eg. 42,1337)"
+ value:
+ name: SERVICES_LIST
+ required: false
+- name: CONFIGURATION_LOADER
+ description: "When to load configuration. If on gateway start or incoming request. Allowed values are: lazy, boot."
+ value: boot
+ required: false
+- description: "Log level. One of the following: debug, info, notice, warn, error, crit, alert, or emerg."
+ name: LOG_LEVEL
+ required: false
+- description: "Enable path routing. Experimental feature."
+ name: PATH_ROUTING
+ required: false
+ value: "false"
+- description: "Enable logging response codes to 3scale."
+ value: "false"
+ name: RESPONSE_CODES
+ required: false
+- name: CONFIGURATION_CACHE
+ description: "For how long to cache the downloaded configuration in seconds. Can be left empty, 0 or greater than 60."
+ value: ""
+ required: false
+- description: "Redis URL. Required for OAuth2 integration. ex: redis://PASSWORD@127.0.0.1:6379/0"
+ name: REDIS_URL
+ required: false
+- name: MANAGEMENT_API
+ description: "Scope of the Management API. Can be disabled, status or debug. At least status required for health checks."
+ required: false
+ value: "status"
+- name: OPENSSL_VERIFY
+ description: "Turn on/off the OpenSSL peer verification. Can be set to true/false."
+ required: true
+ value: "false"
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
index 81ae63416..ec335daa0 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql-persistent.json
@@ -147,6 +147,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
index 7a285dba8..6304586dd 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/dancer-mysql.json
@@ -147,6 +147,9 @@
}
},
"spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
"triggers": [
{
"type": "ImageChange",
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
index 9f982c286..152bf1c7c 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql-persistent.json
@@ -148,7 +148,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
index 7bee85ddd..f3b5f97f3 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/django-postgresql.json
@@ -148,7 +148,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
index 6ee999cb1..c570ca5d5 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb-persistent.json
@@ -154,7 +154,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
index 5c177a7e0..161f1582e 100644
--- a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/nodejs-mongodb.json
@@ -154,7 +154,7 @@
},
"spec": {
"strategy": {
- "type": "Rolling"
+ "type": "Recreate"
},
"triggers": [
{
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml
new file mode 100644
index 000000000..0bbb8e625
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/pvc.yml
@@ -0,0 +1,49 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-pvc"
+objects:
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-storage"
+ spec:
+ accessModes:
+ - "ReadWriteMany"
+ resources:
+ requests:
+ storage: "100Mi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "mysql-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "system-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
+
+- apiVersion: "v1"
+ kind: "PersistentVolumeClaim"
+ metadata:
+ name: "backend-redis-storage"
+ spec:
+ accessModes:
+ - "ReadWriteOnce"
+ resources:
+ requests:
+ storage: "1Gi"
diff --git a/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml
new file mode 100644
index 000000000..00dedecd5
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.6/quickstart-templates/wildcard.yml
@@ -0,0 +1,158 @@
+apiVersion: v1
+kind: Template
+metadata:
+ name: "amp-apicast-wildcard-router"
+objects:
+
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: apicast-router
+ spec:
+ replicas: 1
+ selector:
+ deploymentconfig: apicast-router
+ strategy:
+ type: Rolling
+ template:
+ metadata:
+ labels:
+ deploymentconfig: apicast-router
+ spec:
+ volumes:
+ - name: apicast-router-config
+ configMap:
+ name: apicast-router-config
+ items:
+ - key: router.conf
+ path: router.conf
+ containers:
+ - env:
+ - name: APICAST_CONFIGURATION_LOADER
+ value: "lazy"
+ - name: APICAST_CONFIGURATION_CACHE
+ value: "0"
+ image: 3scale-amp20/apicast-gateway:1.0-3
+ imagePullPolicy: IfNotPresent
+ name: apicast-router
+ command: ['bin/apicast']
+ livenessProbe:
+ tcpSocket:
+ port: router
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 10
+ readinessProbe:
+ httpGet:
+ path: /status/ready
+ port: management
+ initialDelaySeconds: 5
+ timeoutSeconds: 5
+ periodSeconds: 30
+ volumeMounts:
+ - name: apicast-router-config
+ mountPath: /opt/app-root/src/sites.d/
+ readOnly: true
+ ports:
+ - containerPort: 8082
+ name: router
+ protocol: TCP
+ - containerPort: 8090
+ name: management
+ protocol: TCP
+ triggers:
+ - type: ConfigChange
+
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: apicast-router
+ spec:
+ ports:
+ - name: router
+ port: 80
+ protocol: TCP
+ targetPort: router
+ selector:
+ deploymentconfig: apicast-router
+
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: apicast-router-config
+ data:
+ router.conf: |-
+ upstream wildcard {
+ server 0.0.0.1:1;
+
+ balancer_by_lua_block {
+ local round_robin = require 'resty.balancer.round_robin'
+ local balancer = round_robin.new()
+ local peers = balancer:peers(ngx.ctx.apicast)
+
+ local peer, err = balancer:set_peer(peers)
+
+ if not peer then
+ ngx.status = ngx.HTTP_SERVICE_UNAVAILABLE
+ ngx.log(ngx.ERR, "failed to set current backend peer: ", err)
+ ngx.exit(ngx.status)
+ end
+ }
+
+ keepalive 1024;
+ }
+
+ server {
+ listen 8082;
+ server_name ~-(?<apicast>apicast-(staging|production))\.;
+ access_log /dev/stdout combined;
+
+ location / {
+ access_by_lua_block {
+ local resolver = require('resty.resolver'):instance()
+ local servers = resolver:get_servers(ngx.var.apicast, { port = 8080 })
+
+ if #servers == 0 then
+ ngx.status = ngx.HTTP_BAD_GATEWAY
+ ngx.exit(ngx.HTTP_OK)
+ end
+
+ ngx.ctx.apicast = servers
+ }
+ proxy_http_version 1.1;
+ proxy_pass $scheme://wildcard;
+ proxy_set_header Host $host;
+ proxy_set_header Connection "";
+ }
+ }
+
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: apicast-wildcard-router
+ labels:
+ app: apicast-wildcard-router
+ spec:
+ host: apicast-${TENANT_NAME}.${WILDCARD_DOMAIN}
+ to:
+ kind: Service
+ name: apicast-router
+ port:
+ targetPort: router
+ wildcardPolicy: Subdomain
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Allow
+
+parameters:
+- name: AMP_RELEASE
+ description: "AMP release tag."
+ value: 2.0.0-CR2-redhat-1
+ required: true
+- name: WILDCARD_DOMAIN
+ description: Root domain for the wildcard routes. Eg. example.com will generate 3scale-admin.example.com.
+ required: true
+- name: TENANT_NAME
+ description: "Domain name under the root that Admin UI will be available with -admin suffix."
+ required: true
+ value: "3scale"
diff --git a/roles/openshift_excluder/tasks/disable.yml b/roles/openshift_excluder/tasks/disable.yml
index 8d5a08874..5add25b45 100644
--- a/roles/openshift_excluder/tasks/disable.yml
+++ b/roles/openshift_excluder/tasks/disable.yml
@@ -5,10 +5,12 @@
include: verify_upgrade.yml
# unexclude the current openshift/origin-excluder if it is installed so it can be updated
-- name: Disable OpenShift excluder so it can be updated
+- name: Disable excluders before the upgrade to remove older excluding expressions
include: unexclude.yml
vars:
- unexclude_docker_excluder: false
+ # before the docker excluder can be updated, it needs to be disabled
+ # to remove older excluded packages that are no longer excluded
+ unexclude_docker_excluder: "{{ r_openshift_excluder_enable_docker_excluder }}"
unexclude_openshift_excluder: "{{ r_openshift_excluder_enable_openshift_excluder }}"
# Install any excluder that is enabled
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 514c06500..cfe092a28 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -193,8 +193,7 @@ def hostname_valid(hostname):
"""
if (not hostname or
hostname.startswith('localhost') or
- hostname.endswith('localdomain') or
- hostname.endswith('novalocal')):
+ hostname.endswith('localdomain')):
return False
return True
@@ -1041,10 +1040,13 @@ def set_sdn_facts_if_unset(facts, system_facts):
def set_nodename(facts):
""" set nodename """
if 'node' in facts and 'common' in facts:
- if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
- facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
- elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
+ if 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'gce':
facts['node']['nodename'] = facts['provider']['metadata']['instance']['hostname'].split('.')[0]
+
+ # TODO: The openstack cloudprovider nodename setting was too opinionaed.
+ # It needs to be generalized before it can be enabled again.
+ # elif 'cloudprovider' in facts and facts['cloudprovider']['kind'] == 'openstack':
+ # facts['node']['nodename'] = facts['provider']['metadata']['hostname'].replace('.novalocal', '')
else:
facts['node']['nodename'] = facts['common']['hostname'].lower()
return facts
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 03c40b78b..a62e4331e 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -25,9 +25,11 @@ class ActionModule(ActionBase):
def run(self, tmp=None, task_vars=None):
result = super(ActionModule, self).run(tmp, task_vars)
+ task_vars = task_vars or {}
- if task_vars is None:
- task_vars = {}
+ # vars are not supportably available in the callback plugin,
+ # so record any it will need in the result.
+ result['playbook_context'] = task_vars.get('r_openshift_health_checker_playbook_context')
if "openshift" not in task_vars:
result["failed"] = True
@@ -46,19 +48,27 @@ class ActionModule(ActionBase):
result["checks"] = check_results = {}
+ user_disabled_checks = [
+ check.strip()
+ for check in task_vars.get("openshift_disable_check", "").split(",")
+ ]
+
for check_name in resolved_checks:
display.banner("CHECK [{} : {}]".format(check_name, task_vars["ansible_host"]))
check = known_checks[check_name]
- if check.is_active(task_vars):
+ if not check.is_active(task_vars):
+ r = dict(skipped=True, skipped_reason="Not active for this host")
+ elif check_name in user_disabled_checks:
+ r = dict(skipped=True, skipped_reason="Disabled by user request")
+ else:
try:
r = check.run(tmp, task_vars)
except OpenShiftCheckException as e:
- r = {}
- r["failed"] = True
- r["msg"] = str(e)
- else:
- r = {"skipped": True}
+ r = dict(
+ failed=True,
+ msg=str(e),
+ )
check_results[check_name] = r
diff --git a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
index 7bce7f107..64c29a8d9 100644
--- a/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
+++ b/roles/openshift_health_checker/callback_plugins/zz_failure_summary.py
@@ -2,6 +2,12 @@
Ansible callback plugin.
'''
+# Reason: In several locations below we disable pylint protected-access
+# for Ansible objects that do not give us any public way
+# to access the full details we need to report check failures.
+# Status: disabled permanently or until Ansible object has a public API.
+# This does leave the code more likely to be broken by future Ansible changes.
+
from pprint import pformat
from ansible.plugins.callback import CallbackBase
@@ -20,38 +26,37 @@ class CallbackModule(CallbackBase):
CALLBACK_TYPE = 'aggregate'
CALLBACK_NAME = 'failure_summary'
CALLBACK_NEEDS_WHITELIST = False
+ _playbook_file = None
def __init__(self):
super(CallbackModule, self).__init__()
self.__failures = []
+ def v2_playbook_on_start(self, playbook):
+ super(CallbackModule, self).v2_playbook_on_start(playbook)
+ # re: playbook attrs see top comment # pylint: disable=protected-access
+ self._playbook_file = playbook._file_name
+
def v2_runner_on_failed(self, result, ignore_errors=False):
super(CallbackModule, self).v2_runner_on_failed(result, ignore_errors)
self.__failures.append(dict(result=result, ignore_errors=ignore_errors))
def v2_playbook_on_stats(self, stats):
super(CallbackModule, self).v2_playbook_on_stats(stats)
- # TODO: update condition to consider a host var or env var to
- # enable/disable the summary, so that we can control the output from a
- # play.
if self.__failures:
- self._print_failure_summary()
+ self._print_failure_details(self.__failures)
- def _print_failure_summary(self):
- '''Print a summary of failed tasks (including ignored failures).'''
+ def _print_failure_details(self, failures):
+ '''Print a summary of failed tasks or checks.'''
self._display.display(u'\nFailure summary:\n')
- # TODO: group failures by host or by task. If grouped by host, it is
- # easy to see all problems of a given host. If grouped by task, it is
- # easy to see what hosts needs the same fix.
-
- width = len(str(len(self.__failures)))
+ width = len(str(len(failures)))
initial_indent_format = u' {{:>{width}}}. '.format(width=width)
initial_indent_len = len(initial_indent_format.format(0))
subsequent_indent = u' ' * initial_indent_len
subsequent_extra_indent = u' ' * (initial_indent_len + 10)
- for i, failure in enumerate(self.__failures, 1):
+ for i, failure in enumerate(failures, 1):
entries = _format_failure(failure)
self._display.display(u'\n{}{}'.format(initial_indent_format.format(i), entries[0]))
for entry in entries[1:]:
@@ -59,11 +64,52 @@ class CallbackModule(CallbackBase):
indented = u'{}{}'.format(subsequent_indent, entry)
self._display.display(indented)
-
-# Reason: disable pylint protected-access because we need to access _*
-# attributes of a task result to implement this method.
-# Status: permanently disabled unless Ansible's API changes.
-# pylint: disable=protected-access
+ failed_checks = set()
+ playbook_context = None
+ # re: result attrs see top comment # pylint: disable=protected-access
+ for failure in failures:
+ # get context from check task result since callback plugins cannot access task vars
+ playbook_context = playbook_context or failure['result']._result.get('playbook_context')
+ failed_checks.update(
+ name
+ for name, result in failure['result']._result.get('checks', {}).items()
+ if result.get('failed')
+ )
+ if failed_checks:
+ self._print_check_failure_summary(failed_checks, playbook_context)
+
+ def _print_check_failure_summary(self, failed_checks, context):
+ checks = ','.join(sorted(failed_checks))
+ # NOTE: context is not set if all failures occurred prior to checks task
+ summary = (
+ '\n'
+ 'The execution of "{playbook}"\n'
+ 'includes checks designed to fail early if the requirements\n'
+ 'of the playbook are not met. One or more of these checks\n'
+ 'failed. To disregard these results, you may choose to\n'
+ 'disable failing checks by setting an Ansible variable:\n\n'
+ ' openshift_disable_check={checks}\n\n'
+ 'Failing check names are shown in the failure details above.\n'
+ 'Some checks may be configurable by variables if your requirements\n'
+ 'are different from the defaults; consult check documentation.\n'
+ 'Variables can be set in the inventory or passed on the\n'
+ 'command line using the -e flag to ansible-playbook.\n'
+ ).format(playbook=self._playbook_file, checks=checks)
+ if context in ['pre-install', 'health']:
+ summary = (
+ '\n'
+ 'You may choose to configure or disable failing checks by\n'
+ 'setting Ansible variables. To disable those above:\n\n'
+ ' openshift_disable_check={checks}\n\n'
+ 'Consult check documentation for configurable variables.\n'
+ 'Variables can be set in the inventory or passed on the\n'
+ 'command line using the -e flag to ansible-playbook.\n'
+ ).format(checks=checks)
+ # other expected contexts: install, upgrade
+ self._display.display(summary)
+
+
+# re: result attrs see top comment # pylint: disable=protected-access
def _format_failure(failure):
'''Return a list of pretty-formatted text entries describing a failure, including
relevant information about it. Expect that the list of text entries will be joined
@@ -100,11 +146,8 @@ def _format_failed_checks(checks):
return stringc(pformat(checks), C.COLOR_ERROR)
-# Reason: disable pylint protected-access because we need to access _*
-# attributes of obj to implement this function.
-# This is inspired by ansible.playbook.base.Base.dump_me.
-# Status: permanently disabled unless Ansible's API changes.
-# pylint: disable=protected-access
+# This is inspired by ansible.playbook.base.Base.dump_me.
+# re: play/task/block attrs see top comment # pylint: disable=protected-access
def _get_play(obj):
'''Given a task or block, recursively tries to find its parent play.'''
if hasattr(obj, '_play'):
diff --git a/roles/openshift_health_checker/library/aos_version.py b/roles/openshift_health_checker/library/aos_version.py
index 4460ec324..4c205e48c 100755
--- a/roles/openshift_health_checker/library/aos_version.py
+++ b/roles/openshift_health_checker/library/aos_version.py
@@ -16,8 +16,6 @@ of release availability already. Without duplicating all that, we would
like the user to have a helpful error message if we detect things will
not work out right. Note that if openshift_release is not specified in
the inventory, the version comparison checks just pass.
-
-TODO: fail gracefully on non-yum systems (dnf in Fedora)
'''
from ansible.module_utils.basic import AnsibleModule
@@ -26,7 +24,7 @@ IMPORT_EXCEPTION = None
try:
import yum # pylint: disable=import-error
except ImportError as err:
- IMPORT_EXCEPTION = err # in tox test env, yum import fails
+ IMPORT_EXCEPTION = err
class AosVersionException(Exception):
@@ -37,12 +35,10 @@ class AosVersionException(Exception):
def main():
- '''Entrypoint for this Ansible module'''
+ """Entrypoint for this Ansible module"""
module = AnsibleModule(
argument_spec=dict(
- requested_openshift_release=dict(type="str", default=''),
- openshift_deployment_type=dict(required=True),
- rpm_prefix=dict(required=True), # atomic-openshift, origin, ...?
+ package_list=dict(type="list", required=True),
),
supports_check_mode=True
)
@@ -51,32 +47,37 @@ def main():
module.fail_json(msg="aos_version module could not import yum: %s" % IMPORT_EXCEPTION)
# determine the packages we will look for
- rpm_prefix = module.params['rpm_prefix']
- if not rpm_prefix:
- module.fail_json(msg="rpm_prefix must not be empty")
- expected_pkgs = set([
- rpm_prefix,
- rpm_prefix + '-master',
- rpm_prefix + '-node',
- ])
-
- # determine what level of precision the user specified for the openshift version.
- # should look like a version string with possibly many segments e.g. "3.4.1":
- requested_openshift_release = module.params['requested_openshift_release']
+ package_list = module.params['package_list']
+ if not package_list:
+ module.fail_json(msg="package_list must not be empty")
+
+ # generate set with only the names of expected packages
+ expected_pkg_names = [p["name"] for p in package_list]
+
+ # gather packages that require a multi_minor_release check
+ multi_minor_pkgs = [p for p in package_list if p["check_multi"]]
+
+ # generate list of packages with a specified (non-empty) version
+ # should look like a version string with possibly many segments e.g. "3.4.1"
+ versioned_pkgs = [p for p in package_list if p["version"]]
# get the list of packages available and complain if anything is wrong
try:
- pkgs = _retrieve_available_packages(expected_pkgs)
- if requested_openshift_release:
- _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release)
- _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release)
- if module.params['openshift_deployment_type'] in ['openshift-enterprise']:
- _check_multi_minor_release(pkgs, expected_pkgs)
+ pkgs = _retrieve_available_packages(expected_pkg_names)
+ if versioned_pkgs:
+ _check_precise_version_found(pkgs, _to_dict(versioned_pkgs))
+ _check_higher_version_found(pkgs, _to_dict(versioned_pkgs))
+ if multi_minor_pkgs:
+ _check_multi_minor_release(pkgs, _to_dict(multi_minor_pkgs))
except AosVersionException as excinfo:
module.fail_json(msg=str(excinfo))
module.exit_json(changed=False)
+def _to_dict(pkg_list):
+ return {pkg["name"]: pkg for pkg in pkg_list}
+
+
def _retrieve_available_packages(expected_pkgs):
# search for package versions available for openshift pkgs
yb = yum.YumBase() # pylint: disable=invalid-name
@@ -104,56 +105,60 @@ def _retrieve_available_packages(expected_pkgs):
class PreciseVersionNotFound(AosVersionException):
- '''Exception for reporting packages not available at given release'''
- def __init__(self, requested_release, not_found):
- msg = ['Not all of the required packages are available at requested version %s:' % requested_release]
- msg += [' ' + name for name in not_found]
+ """Exception for reporting packages not available at given version"""
+ def __init__(self, not_found):
+ msg = ['Not all of the required packages are available at their requested version']
+ msg += ['{}:{} '.format(pkg["name"], pkg["version"]) for pkg in not_found]
msg += ['Please check your subscriptions and enabled repositories.']
AosVersionException.__init__(self, '\n'.join(msg), not_found)
-def _check_precise_version_found(pkgs, expected_pkgs, requested_openshift_release):
+def _check_precise_version_found(pkgs, expected_pkgs_dict):
# see if any packages couldn't be found at requested release version
# we would like to verify that the latest available pkgs have however specific a version is given.
# so e.g. if there is a package version 3.4.1.5 the check passes; if only 3.4.0, it fails.
- pkgs_precise_version_found = {}
+ pkgs_precise_version_found = set()
for pkg in pkgs:
- if pkg.name not in expected_pkgs:
+ if pkg.name not in expected_pkgs_dict:
continue
# does the version match, to the precision requested?
# and, is it strictly greater, at the precision requested?
- match_version = '.'.join(pkg.version.split('.')[:requested_openshift_release.count('.') + 1])
- if match_version == requested_openshift_release:
- pkgs_precise_version_found[pkg.name] = True
+ expected_pkg_version = expected_pkgs_dict[pkg.name]["version"]
+ match_version = '.'.join(pkg.version.split('.')[:expected_pkg_version.count('.') + 1])
+ if match_version == expected_pkg_version:
+ pkgs_precise_version_found.add(pkg.name)
not_found = []
- for name in expected_pkgs:
+ for name, pkg in expected_pkgs_dict.items():
if name not in pkgs_precise_version_found:
- not_found.append(name)
+ not_found.append(pkg)
if not_found:
- raise PreciseVersionNotFound(requested_openshift_release, not_found)
+ raise PreciseVersionNotFound(not_found)
class FoundHigherVersion(AosVersionException):
- '''Exception for reporting that a higher version than requested is available'''
- def __init__(self, requested_release, higher_found):
+ """Exception for reporting that a higher version than requested is available"""
+ def __init__(self, higher_found):
msg = ['Some required package(s) are available at a version',
- 'that is higher than requested %s:' % requested_release]
+ 'that is higher than requested']
msg += [' ' + name for name in higher_found]
msg += ['This will prevent installing the version you requested.']
msg += ['Please check your enabled repositories or adjust openshift_release.']
AosVersionException.__init__(self, '\n'.join(msg), higher_found)
-def _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release):
- req_release_arr = [int(segment) for segment in requested_openshift_release.split(".")]
+def _check_higher_version_found(pkgs, expected_pkgs_dict):
+ expected_pkg_names = list(expected_pkgs_dict)
+
# see if any packages are available in a version higher than requested
higher_version_for_pkg = {}
for pkg in pkgs:
- if pkg.name not in expected_pkgs:
+ if pkg.name not in expected_pkg_names:
continue
+ expected_pkg_version = expected_pkgs_dict[pkg.name]["version"]
+ req_release_arr = [int(segment) for segment in expected_pkg_version.split(".")]
version = [int(segment) for segment in pkg.version.split(".")]
too_high = version[:len(req_release_arr)] > req_release_arr
higher_than_seen = version > higher_version_for_pkg.get(pkg.name, [])
@@ -164,11 +169,11 @@ def _check_higher_version_found(pkgs, expected_pkgs, requested_openshift_release
higher_found = []
for name, version in higher_version_for_pkg.items():
higher_found.append(name + '-' + '.'.join(str(segment) for segment in version))
- raise FoundHigherVersion(requested_openshift_release, higher_found)
+ raise FoundHigherVersion(higher_found)
class FoundMultiRelease(AosVersionException):
- '''Exception for reporting multiple minor releases found for same package'''
+ """Exception for reporting multiple minor releases found for same package"""
def __init__(self, multi_found):
msg = ['Multiple minor versions of these packages are available']
msg += [' ' + name for name in multi_found]
@@ -176,18 +181,18 @@ class FoundMultiRelease(AosVersionException):
AosVersionException.__init__(self, '\n'.join(msg), multi_found)
-def _check_multi_minor_release(pkgs, expected_pkgs):
+def _check_multi_minor_release(pkgs, expected_pkgs_dict):
# see if any packages are available in more than one minor version
pkgs_by_name_version = {}
for pkg in pkgs:
# keep track of x.y (minor release) versions seen
minor_release = '.'.join(pkg.version.split('.')[:2])
if pkg.name not in pkgs_by_name_version:
- pkgs_by_name_version[pkg.name] = {}
- pkgs_by_name_version[pkg.name][minor_release] = True
+ pkgs_by_name_version[pkg.name] = set()
+ pkgs_by_name_version[pkg.name].add(minor_release)
multi_found = []
- for name in expected_pkgs:
+ for name in expected_pkgs_dict:
if name in pkgs_by_name_version and len(pkgs_by_name_version[name]) > 1:
multi_found.append(name)
diff --git a/roles/openshift_health_checker/library/ocutil.py b/roles/openshift_health_checker/library/ocutil.py
new file mode 100644
index 000000000..2e60735d6
--- /dev/null
+++ b/roles/openshift_health_checker/library/ocutil.py
@@ -0,0 +1,74 @@
+#!/usr/bin/python
+"""Interface to OpenShift oc command"""
+
+import os
+import shlex
+import shutil
+import subprocess
+
+from ansible.module_utils.basic import AnsibleModule
+
+
+ADDITIONAL_PATH_LOOKUPS = ['/usr/local/bin', os.path.expanduser('~/bin')]
+
+
+def locate_oc_binary():
+ """Find and return oc binary file"""
+ # https://github.com/openshift/openshift-ansible/issues/3410
+ # oc can be in /usr/local/bin in some cases, but that may not
+ # be in $PATH due to ansible/sudo
+ paths = os.environ.get("PATH", os.defpath).split(os.pathsep) + ADDITIONAL_PATH_LOOKUPS
+
+ oc_binary = 'oc'
+
+ # Use shutil.which if it is available, otherwise fallback to a naive path search
+ try:
+ which_result = shutil.which(oc_binary, path=os.pathsep.join(paths))
+ if which_result is not None:
+ oc_binary = which_result
+ except AttributeError:
+ for path in paths:
+ if os.path.exists(os.path.join(path, oc_binary)):
+ oc_binary = os.path.join(path, oc_binary)
+ break
+
+ return oc_binary
+
+
+def main():
+ """Module that executes commands on a remote OpenShift cluster"""
+
+ module = AnsibleModule(
+ argument_spec=dict(
+ namespace=dict(type="str", required=True),
+ config_file=dict(type="str", required=True),
+ cmd=dict(type="str", required=True),
+ extra_args=dict(type="list", default=[]),
+ ),
+ )
+
+ cmd = [
+ locate_oc_binary(),
+ '--config', module.params["config_file"],
+ '-n', module.params["namespace"],
+ ] + shlex.split(module.params["cmd"])
+
+ failed = True
+ try:
+ cmd_result = subprocess.check_output(list(cmd), stderr=subprocess.STDOUT)
+ failed = False
+ except subprocess.CalledProcessError as exc:
+ cmd_result = '[rc {}] {}\n{}'.format(exc.returncode, ' '.join(exc.cmd), exc.output)
+ except OSError as exc:
+ # we get this when 'oc' is not there
+ cmd_result = str(exc)
+
+ module.exit_json(
+ changed=False,
+ failed=failed,
+ result=cmd_result,
+ )
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/library/rpm_version.py b/roles/openshift_health_checker/library/rpm_version.py
new file mode 100644
index 000000000..8ea223055
--- /dev/null
+++ b/roles/openshift_health_checker/library/rpm_version.py
@@ -0,0 +1,127 @@
+#!/usr/bin/python
+"""
+Ansible module for rpm-based systems determining existing package version information in a host.
+"""
+
+from ansible.module_utils.basic import AnsibleModule
+
+IMPORT_EXCEPTION = None
+try:
+ import rpm # pylint: disable=import-error
+except ImportError as err:
+ IMPORT_EXCEPTION = err # in tox test env, rpm import fails
+
+
+class RpmVersionException(Exception):
+ """Base exception class for package version problems"""
+ def __init__(self, message, problem_pkgs=None):
+ Exception.__init__(self, message)
+ self.problem_pkgs = problem_pkgs
+
+
+def main():
+ """Entrypoint for this Ansible module"""
+ module = AnsibleModule(
+ argument_spec=dict(
+ package_list=dict(type="list", required=True),
+ ),
+ supports_check_mode=True
+ )
+
+ if IMPORT_EXCEPTION:
+ module.fail_json(msg="rpm_version module could not import rpm: %s" % IMPORT_EXCEPTION)
+
+ # determine the packages we will look for
+ pkg_list = module.params['package_list']
+ if not pkg_list:
+ module.fail_json(msg="package_list must not be empty")
+
+ # get list of packages available and complain if any
+ # of them are missing or if any errors occur
+ try:
+ pkg_versions = _retrieve_expected_pkg_versions(_to_dict(pkg_list))
+ _check_pkg_versions(pkg_versions, _to_dict(pkg_list))
+ except RpmVersionException as excinfo:
+ module.fail_json(msg=str(excinfo))
+ module.exit_json(changed=False)
+
+
+def _to_dict(pkg_list):
+ return {pkg["name"]: pkg for pkg in pkg_list}
+
+
+def _retrieve_expected_pkg_versions(expected_pkgs_dict):
+ """Search for installed packages matching given pkg names
+ and versions. Returns a dictionary: {pkg_name: [versions]}"""
+
+ transaction = rpm.TransactionSet()
+ pkgs = {}
+
+ for pkg_name in expected_pkgs_dict:
+ matched_pkgs = transaction.dbMatch("name", pkg_name)
+ if not matched_pkgs:
+ continue
+
+ for header in matched_pkgs:
+ if header['name'] == pkg_name:
+ if pkg_name not in pkgs:
+ pkgs[pkg_name] = []
+
+ pkgs[pkg_name].append(header['version'])
+
+ return pkgs
+
+
+def _check_pkg_versions(found_pkgs_dict, expected_pkgs_dict):
+ invalid_pkg_versions = {}
+ not_found_pkgs = []
+
+ for pkg_name, pkg in expected_pkgs_dict.items():
+ if not found_pkgs_dict.get(pkg_name):
+ not_found_pkgs.append(pkg_name)
+ continue
+
+ found_versions = [_parse_version(version) for version in found_pkgs_dict[pkg_name]]
+ expected_version = _parse_version(pkg["version"])
+ if expected_version not in found_versions:
+ invalid_pkg_versions[pkg_name] = {
+ "found_versions": found_versions,
+ "required_version": expected_version,
+ }
+
+ if not_found_pkgs:
+ raise RpmVersionException(
+ '\n'.join([
+ "The following packages were not found to be installed: {}".format('\n '.join([
+ "{}".format(pkg)
+ for pkg in not_found_pkgs
+ ]))
+ ]),
+ not_found_pkgs,
+ )
+
+ if invalid_pkg_versions:
+ raise RpmVersionException(
+ '\n '.join([
+ "The following packages were found to be installed with an incorrect version: {}".format('\n'.join([
+ " \n{}\n Required version: {}\n Found versions: {}".format(
+ pkg_name,
+ pkg["required_version"],
+ ', '.join([version for version in pkg["found_versions"]]))
+ for pkg_name, pkg in invalid_pkg_versions.items()
+ ]))
+ ]),
+ invalid_pkg_versions,
+ )
+
+
+def _parse_version(version_str):
+ segs = version_str.split('.')
+ if not segs or len(segs) <= 2:
+ return version_str
+
+ return '.'.join(segs[0:2])
+
+
+if __name__ == '__main__':
+ main()
diff --git a/roles/openshift_health_checker/meta/main.yml b/roles/openshift_health_checker/meta/main.yml
index cd9b55902..4d141974c 100644
--- a/roles/openshift_health_checker/meta/main.yml
+++ b/roles/openshift_health_checker/meta/main.yml
@@ -2,3 +2,4 @@
dependencies:
- role: openshift_facts
- role: openshift_repos
+ - role: openshift_version
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index be63d864a..5c9949ced 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -66,16 +66,26 @@ class OpenShiftCheck(object):
LOADER_EXCLUDES = (
"__init__.py",
"mixins.py",
+ "logging.py",
)
-def load_checks():
+def load_checks(path=None, subpkg=""):
"""Dynamically import all check modules for the side effect of registering checks."""
- return [
- import_module(__package__ + "." + name[:-3])
- for name in os.listdir(os.path.dirname(__file__))
- if name.endswith(".py") and name not in LOADER_EXCLUDES
- ]
+ if path is None:
+ path = os.path.dirname(__file__)
+
+ modules = []
+
+ for name in os.listdir(path):
+ if os.path.isdir(os.path.join(path, name)):
+ modules = modules + load_checks(os.path.join(path, name), subpkg + "." + name)
+ continue
+
+ if name.endswith(".py") and name not in LOADER_EXCLUDES:
+ modules.append(import_module(__package__ + subpkg + "." + name[:-3]))
+
+ return modules
def get_var(task_vars, *keys, **kwargs):
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index c2792a0fe..962148cb8 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -27,10 +27,12 @@ class DiskAvailability(NotContainerizedMixin, OpenShiftCheck):
def run(self, tmp, task_vars):
group_names = get_var(task_vars, "group_names")
ansible_mounts = get_var(task_vars, "ansible_mounts")
-
- min_free_bytes = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
free_bytes = self.openshift_available_disk(ansible_mounts)
+ recommended_min = max(self.recommended_disk_space_bytes.get(name, 0) for name in group_names)
+ configured_min = int(get_var(task_vars, "openshift_check_min_host_disk_gb", default=0)) * 10**9
+ min_free_bytes = configured_min or recommended_min
+
if free_bytes < min_free_bytes:
return {
'failed': True,
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index cce289b95..27e6fe383 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,8 +1,9 @@
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks.mixins import DockerHostMixin
-class DockerImageAvailability(OpenShiftCheck):
+class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
"""Check that required Docker images are available.
This check attempts to ensure that required docker images are
@@ -13,41 +14,47 @@ class DockerImageAvailability(OpenShiftCheck):
name = "docker_image_availability"
tags = ["preflight"]
- skopeo_image = "openshift/openshift-ansible"
+ dependencies = ["skopeo", "python-docker-py"]
- # FIXME(juanvallejo): we should consider other possible values of
- # `deployment_type` (the key here). See
- # https://github.com/openshift/openshift-ansible/blob/8e26f8c/roles/openshift_repos/vars/main.yml#L7
- docker_image_base = {
+ deployment_image_info = {
"origin": {
- "repo": "openshift",
- "image": "origin",
+ "namespace": "openshift",
+ "name": "origin",
},
"openshift-enterprise": {
- "repo": "openshift3",
- "image": "ose",
+ "namespace": "openshift3",
+ "name": "ose",
},
}
- def run(self, tmp, task_vars):
- required_images = self.required_images(task_vars)
- missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
-
- # exit early if all images were found locally
- if not missing_images:
- return {"changed": False}
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts with unsupported deployment types."""
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ has_valid_deployment_type = deployment_type in cls.deployment_image_info
- msg, failed, changed = self.update_skopeo_image(task_vars)
+ return super(DockerImageAvailability, cls).is_active(task_vars) and has_valid_deployment_type
- # exit early if Skopeo update fails
+ def run(self, tmp, task_vars):
+ msg, failed, changed = self.ensure_dependencies(task_vars)
if failed:
return {
"failed": True,
"changed": changed,
- "msg": "Failed to update Skopeo image ({img_name}). {msg}".format(img_name=self.skopeo_image, msg=msg),
+ "msg": "Some dependencies are required in order to check Docker image availability.\n" + msg
}
+ required_images = self.required_images(task_vars)
+ missing_images = set(required_images) - set(self.local_images(required_images, task_vars))
+
+ # exit early if all images were found locally
+ if not missing_images:
+ return {"changed": changed}
+
registries = self.known_docker_registries(task_vars)
+ if not registries:
+ return {"failed": True, "msg": "Unable to retrieve any docker registries.", "changed": changed}
+
available_images = self.available_images(missing_images, registries, task_vars)
unavailable_images = set(missing_images) - set(available_images)
@@ -55,44 +62,60 @@ class DockerImageAvailability(OpenShiftCheck):
return {
"failed": True,
"msg": (
- "One or more required images are not available: {}.\n"
+ "One or more required Docker images are not available:\n {}\n"
"Configured registries: {}"
- ).format(", ".join(sorted(unavailable_images)), ", ".join(registries)),
+ ).format(",\n ".join(sorted(unavailable_images)), ", ".join(registries)),
"changed": changed,
}
return {"changed": changed}
def required_images(self, task_vars):
- deployment_type = get_var(task_vars, "deployment_type")
- # FIXME(juanvallejo): we should handle gracefully with a proper error
- # message when given an unexpected value for `deployment_type`.
- image_base_name = self.docker_image_base[deployment_type]
-
- openshift_release = get_var(task_vars, "openshift_release")
- # FIXME(juanvallejo): this variable is not required when the
- # installation is non-containerized. The example inventories have it
- # commented out. We should handle gracefully and with a proper error
- # message when this variable is required and not set.
- openshift_image_tag = get_var(task_vars, "openshift_image_tag")
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ image_info = self.deployment_image_info[deployment_type]
+ openshift_release = get_var(task_vars, "openshift_release", default="latest")
+ openshift_image_tag = get_var(task_vars, "openshift_image_tag")
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
- if is_containerized:
- images = set(self.containerized_docker_images(image_base_name, openshift_release))
- else:
- images = set(self.rpm_docker_images(image_base_name, openshift_release))
+ images = set(self.required_docker_images(
+ image_info["namespace"],
+ image_info["name"],
+ ["registry-console"] if "enterprise" in deployment_type else [], # include enterprise-only image names
+ openshift_release,
+ is_containerized,
+ ))
# append images with qualified image tags to our list of required images.
# these are images with a (v0.0.0.0) tag, rather than a standard release
# format tag (v0.0). We want to check this set in both containerized and
# non-containerized installations.
images.update(
- self.qualified_docker_images(self.image_from_base_name(image_base_name), "v" + openshift_image_tag)
+ self.required_qualified_docker_images(
+ image_info["namespace"],
+ image_info["name"],
+ openshift_image_tag,
+ ),
)
return images
+ @staticmethod
+ def required_docker_images(namespace, name, additional_image_names, version, is_containerized):
+ if is_containerized:
+ return ["{}/{}:{}".format(namespace, name, version)] if name else []
+
+ # include additional non-containerized images specific to the current deployment type
+ return ["{}/{}:{}".format(namespace, img_name, version) for img_name in additional_image_names]
+
+ @staticmethod
+ def required_qualified_docker_images(namespace, name, version):
+ # pylint: disable=invalid-name
+ return [
+ "{}/{}-{}:{}".format(namespace, name, suffix, version)
+ for suffix in ["haproxy-router", "docker-registry", "deployer", "pod"]
+ ]
+
def local_images(self, images, task_vars):
"""Filter a list of images and return those available locally."""
return [
@@ -107,31 +130,26 @@ class DockerImageAvailability(OpenShiftCheck):
return bool(result.get("images", []))
- def known_docker_registries(self, task_vars):
- result = self.module_executor("docker_info", {}, task_vars)
+ @staticmethod
+ def known_docker_registries(task_vars):
+ docker_facts = get_var(task_vars, "openshift", "docker")
+ regs = set(docker_facts["additional_registries"])
- if result.get("failed", False):
- return []
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ if deployment_type == "origin":
+ regs.update(["docker.io"])
+ elif "enterprise" in deployment_type:
+ regs.update(["registry.access.redhat.com"])
- # FIXME(juanvallejo): wrong default type, result["info"] is expected to
- # contain a dictionary (see how we call `docker_info.get` below).
- docker_info = result.get("info", "")
- return [registry.get("Name", "") for registry in docker_info.get("Registries", {})]
+ return list(regs)
def available_images(self, images, registries, task_vars):
"""Inspect existing images using Skopeo and return all images successfully inspected."""
return [
image for image in images
- if self.is_image_available(image, registries, task_vars)
+ if any(self.is_available_skopeo_image(image, registry, task_vars) for registry in registries)
]
- def is_image_available(self, image, registries, task_vars):
- for registry in registries:
- if self.is_available_skopeo_image(image, registry, task_vars):
- return True
-
- return False
-
def is_available_skopeo_image(self, image, registry, task_vars):
"""Uses Skopeo to determine if required image exists in a given registry."""
@@ -140,40 +158,6 @@ class DockerImageAvailability(OpenShiftCheck):
image=image,
)
- args = {
- "name": "skopeo_inspect",
- "image": self.skopeo_image,
- "command": cmd_str,
- "detach": False,
- "cleanup": True,
- }
- result = self.module_executor("docker_container", args, task_vars)
- return result.get("failed", False)
-
- def containerized_docker_images(self, base_name, version):
- return [
- "{image}:{version}".format(image=self.image_from_base_name(base_name), version=version)
- ]
-
- @staticmethod
- def rpm_docker_images(base, version):
- return [
- "{image_repo}/registry-console:{version}".format(image_repo=base["repo"], version=version)
- ]
-
- @staticmethod
- def qualified_docker_images(image_name, version):
- return [
- "{}-{}:{}".format(image_name, component, version)
- for component in "haproxy-router docker-registry deployer pod".split()
- ]
-
- @staticmethod
- def image_from_base_name(base):
- return "".join([base["repo"], "/", base["image"]])
-
- # ensures that the skopeo docker image exists, and updates it
- # with latest if image was already present locally.
- def update_skopeo_image(self, task_vars):
- result = self.module_executor("docker_image", {"name": self.skopeo_image}, task_vars)
- return result.get("msg", ""), result.get("failed", False), result.get("changed", False)
+ args = {"_raw_params": cmd_str}
+ result = self.module_executor("command", args, task_vars)
+ return not result.get("failed", False) and result.get("rc", 0) == 0
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
new file mode 100644
index 000000000..7f1751b36
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -0,0 +1,185 @@
+"""Check Docker storage driver and usage."""
+import json
+import re
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import DockerHostMixin
+
+
+class DockerStorage(DockerHostMixin, OpenShiftCheck):
+ """Check Docker storage driver compatibility.
+
+ This check ensures that Docker is using a supported storage driver,
+ and that loopback is not being used (if using devicemapper).
+ Also that storage usage is not above threshold.
+ """
+
+ name = "docker_storage"
+ tags = ["pre-install", "health", "preflight"]
+
+ dependencies = ["python-docker-py"]
+ storage_drivers = ["devicemapper", "overlay2"]
+ max_thinpool_data_usage_percent = 90.0
+ max_thinpool_meta_usage_percent = 90.0
+
+ # pylint: disable=too-many-return-statements
+ # Reason: permanent stylistic exception;
+ # it is clearer to return on failures and there are just many ways to fail here.
+ def run(self, tmp, task_vars):
+ msg, failed, changed = self.ensure_dependencies(task_vars)
+ if failed:
+ return {
+ "failed": True,
+ "changed": changed,
+ "msg": "Some dependencies are required in order to query docker storage on host:\n" + msg
+ }
+
+ # attempt to get the docker info hash from the API
+ info = self.execute_module("docker_info", {}, task_vars)
+ if info.get("failed"):
+ return {"failed": True, "changed": changed,
+ "msg": "Failed to query Docker API. Is docker running on this host?"}
+ if not info.get("info"): # this would be very strange
+ return {"failed": True, "changed": changed,
+ "msg": "Docker API query missing info:\n{}".format(json.dumps(info))}
+ info = info["info"]
+
+ # check if the storage driver we saw is valid
+ driver = info.get("Driver", "[NONE]")
+ if driver not in self.storage_drivers:
+ msg = (
+ "Detected unsupported Docker storage driver '{driver}'.\n"
+ "Supported storage drivers are: {drivers}"
+ ).format(driver=driver, drivers=', '.join(self.storage_drivers))
+ return {"failed": True, "changed": changed, "msg": msg}
+
+ # driver status info is a list of tuples; convert to dict and validate based on driver
+ driver_status = {item[0]: item[1] for item in info.get("DriverStatus", [])}
+ if driver == "devicemapper":
+ if driver_status.get("Data loop file"):
+ msg = (
+ "Use of loopback devices with the Docker devicemapper storage driver\n"
+ "(the default storage configuration) is unsupported in production.\n"
+ "Please use docker-storage-setup to configure a backing storage volume.\n"
+ "See http://red.ht/2rNperO for further information."
+ )
+ return {"failed": True, "changed": changed, "msg": msg}
+ result = self._check_dm_usage(driver_status, task_vars)
+ result['changed'] = result.get('changed', False) or changed
+ return result
+
+ # TODO(lmeyer): determine how to check usage for overlay2
+
+ return {"changed": changed}
+
+ def _check_dm_usage(self, driver_status, task_vars):
+ """
+ Backing assumptions: We expect devicemapper to be backed by an auto-expanding thin pool
+ implemented as an LV in an LVM2 VG. This is how docker-storage-setup currently configures
+ devicemapper storage. The LV is "thin" because it does not use all available storage
+ from its VG, instead expanding as needed; so to determine available space, we gather
+ current usage as the Docker API reports for the driver as well as space available for
+ expansion in the pool's VG.
+ Usage within the LV is divided into pools allocated to data and metadata, either of which
+ could run out of space first; so we check both.
+ """
+ vals = dict(
+ vg_free=self._get_vg_free(driver_status.get("Pool Name"), task_vars),
+ data_used=driver_status.get("Data Space Used"),
+ data_total=driver_status.get("Data Space Total"),
+ metadata_used=driver_status.get("Metadata Space Used"),
+ metadata_total=driver_status.get("Metadata Space Total"),
+ )
+
+ # convert all human-readable strings to bytes
+ for key, value in vals.copy().items():
+ try:
+ vals[key + "_bytes"] = self._convert_to_bytes(value)
+ except ValueError as err: # unlikely to hit this from API info, but just to be safe
+ return {
+ "failed": True,
+ "values": vals,
+ "msg": "Could not interpret {} value '{}' as bytes: {}".format(key, value, str(err))
+ }
+
+ # determine the threshold percentages which usage should not exceed
+ for name, default in [("data", self.max_thinpool_data_usage_percent),
+ ("metadata", self.max_thinpool_meta_usage_percent)]:
+ percent = get_var(task_vars, "max_thinpool_" + name + "_usage_percent", default=default)
+ try:
+ vals[name + "_threshold"] = float(percent)
+ except ValueError:
+ return {
+ "failed": True,
+ "msg": "Specified thinpool {} usage limit '{}' is not a percentage".format(name, percent)
+ }
+
+ # test whether the thresholds are exceeded
+ messages = []
+ for name in ["data", "metadata"]:
+ vals[name + "_pct_used"] = 100 * vals[name + "_used_bytes"] / (
+ vals[name + "_total_bytes"] + vals["vg_free_bytes"])
+ if vals[name + "_pct_used"] > vals[name + "_threshold"]:
+ messages.append(
+ "Docker thinpool {name} usage percentage {pct:.1f} "
+ "is higher than threshold {thresh:.1f}.".format(
+ name=name,
+ pct=vals[name + "_pct_used"],
+ thresh=vals[name + "_threshold"],
+ ))
+ vals["failed"] = True
+
+ vals["msg"] = "\n".join(messages or ["Thinpool usage is within thresholds."])
+ return vals
+
+ def _get_vg_free(self, pool, task_vars):
+ # Determine which VG to examine according to the pool name, the only indicator currently
+ # available from the Docker API driver info. We assume a name that looks like
+ # "vg--name-docker--pool"; vg and lv names with inner hyphens doubled, joined by a hyphen.
+ match = re.match(r'((?:[^-]|--)+)-(?!-)', pool) # matches up to the first single hyphen
+ if not match: # unlikely, but... be clear if we assumed wrong
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{}'.\n"
+ "However this name does not have the expected format of 'vgname-lvname'\n"
+ "so the available storage in the VG cannot be determined.".format(pool)
+ )
+ vg_name = match.groups()[0].replace("--", "-")
+ vgs_cmd = "/sbin/vgs --noheadings -o vg_free --select vg_name=" + vg_name
+ # should return free space like " 12.00g" if the VG exists; empty if it does not
+
+ ret = self.execute_module("command", {"_raw_params": vgs_cmd}, task_vars)
+ if ret.get("failed") or ret.get("rc", 0) != 0:
+ raise OpenShiftCheckException(
+ "Is LVM installed? Failed to run /sbin/vgs "
+ "to determine docker storage usage:\n" + ret.get("msg", "")
+ )
+ size = ret.get("stdout", "").strip()
+ if not size:
+ raise OpenShiftCheckException(
+ "This host's Docker reports it is using a storage pool named '{pool}'.\n"
+ "which we expect to come from local VG '{vg}'.\n"
+ "However, /sbin/vgs did not find this VG. Is Docker for this host"
+ "running and using the storage on the host?".format(pool=pool, vg=vg_name)
+ )
+ return size
+
+ @staticmethod
+ def _convert_to_bytes(string):
+ units = dict(
+ b=1,
+ k=1024,
+ m=1024**2,
+ g=1024**3,
+ t=1024**4,
+ p=1024**5,
+ )
+ string = string or ""
+ match = re.match(r'(\d+(?:\.\d+)?)\s*(\w)?', string) # float followed by optional unit
+ if not match:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ number, unit = match.groups()
+ multiplier = 1 if not unit else units.get(unit.lower())
+ if not multiplier:
+ raise ValueError("Cannot convert to a byte size: " + string)
+
+ return float(number) * multiplier
diff --git a/roles/openshift_health_checker/openshift_checks/logging/__init__.py b/roles/openshift_health_checker/openshift_checks/logging/__init__.py
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/__init__.py
diff --git a/roles/openshift_health_checker/openshift_checks/logging/curator.py b/roles/openshift_health_checker/openshift_checks/logging/curator.py
new file mode 100644
index 000000000..c9fc59896
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/curator.py
@@ -0,0 +1,61 @@
+"""
+Module for performing checks on an Curator logging deployment
+"""
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Curator(LoggingCheck):
+ """Module that checks an integrated logging Curator deployment"""
+
+ name = "curator"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ curator_pods, error = super(Curator, self).get_pods_for_component(
+ self.module_executor,
+ self.logging_namespace,
+ "curator",
+ task_vars
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_curator(curator_pods)
+
+ if check_error:
+ msg = ("The following Curator deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Curator deployment.'}
+
+ def check_curator(self, pods):
+ """Check to see if curator is up and working. Returns: error string"""
+ if not pods:
+ return (
+ "There are no Curator pods for the logging stack,\n"
+ "so nothing will prune Elasticsearch indexes.\n"
+ "Is Curator correctly deployed?"
+ )
+
+ not_running = super(Curator, self).not_running_pods(pods)
+ if len(not_running) == len(pods):
+ return (
+ "The Curator pod is not currently in a running state,\n"
+ "so Elasticsearch indexes may increase without bound."
+ )
+ if len(pods) - len(not_running) > 1:
+ return (
+ "There is more than one Curator pod running. This should not normally happen.\n"
+ "Although this doesn't cause any problems, you may want to investigate."
+ )
+
+ return None
diff --git a/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
new file mode 100644
index 000000000..01cb35b81
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/elasticsearch.py
@@ -0,0 +1,217 @@
+"""
+Module for performing checks on an Elasticsearch logging deployment
+"""
+
+import json
+import re
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Elasticsearch(LoggingCheck):
+ """Module that checks an integrated logging Elasticsearch deployment"""
+
+ name = "elasticsearch"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ es_pods, error = super(Elasticsearch, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "es",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_elasticsearch(es_pods, task_vars)
+
+ if check_error:
+ msg = ("The following Elasticsearch deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Elasticsearch deployment.'}
+
+ def _not_running_elasticsearch_pods(self, es_pods):
+ """Returns: list of running pods, list of errors about non-running pods"""
+ not_running = super(Elasticsearch, self).not_running_pods(es_pods)
+ if not_running:
+ return not_running, [(
+ 'The following Elasticsearch pods are not running:\n'
+ '{pods}'
+ 'These pods will not aggregate logs from their nodes.'
+ ).format(pods=''.join(
+ " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
+ for pod in not_running
+ ))]
+ return not_running, []
+
+ def check_elasticsearch(self, es_pods, task_vars):
+ """Various checks for elasticsearch. Returns: error string"""
+ not_running_pods, error_msgs = self._not_running_elasticsearch_pods(es_pods)
+ running_pods = [pod for pod in es_pods if pod not in not_running_pods]
+ pods_by_name = {
+ pod['metadata']['name']: pod for pod in running_pods
+ # Filter out pods that are not members of a DC
+ if pod['metadata'].get('labels', {}).get('deploymentconfig')
+ }
+ if not pods_by_name:
+ return 'No logging Elasticsearch pods were found. Is logging deployed?'
+ error_msgs += self._check_elasticsearch_masters(pods_by_name, task_vars)
+ error_msgs += self._check_elasticsearch_node_list(pods_by_name, task_vars)
+ error_msgs += self._check_es_cluster_health(pods_by_name, task_vars)
+ error_msgs += self._check_elasticsearch_diskspace(pods_by_name, task_vars)
+ return '\n'.join(error_msgs)
+
+ @staticmethod
+ def _build_es_curl_cmd(pod_name, url):
+ base = "exec {name} -- curl -s --cert {base}cert --key {base}key --cacert {base}ca -XGET '{url}'"
+ return base.format(base="/etc/elasticsearch/secret/admin-", name=pod_name, url=url)
+
+ def _check_elasticsearch_masters(self, pods_by_name, task_vars):
+ """Check that Elasticsearch masters are sane. Returns: list of error strings"""
+ es_master_names = set()
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ # Compare what each ES node reports as master and compare for split brain
+ get_master_cmd = self._build_es_curl_cmd(pod_name, "https://localhost:9200/_cat/master")
+ master_name_str = self._exec_oc(get_master_cmd, [], task_vars)
+ master_names = (master_name_str or '').split(' ')
+ if len(master_names) > 1:
+ es_master_names.add(master_names[1])
+ else:
+ error_msgs.append(
+ 'No master? Elasticsearch {pod} returned bad string when asked master name:\n'
+ ' {response}'.format(pod=pod_name, response=master_name_str)
+ )
+
+ if not es_master_names:
+ error_msgs.append('No logging Elasticsearch masters were found. Is logging deployed?')
+ return '\n'.join(error_msgs)
+
+ if len(es_master_names) > 1:
+ error_msgs.append(
+ 'Found multiple Elasticsearch masters according to the pods:\n'
+ '{master_list}\n'
+ 'This implies that the masters have "split brain" and are not correctly\n'
+ 'replicating data for the logging cluster. Log loss is likely to occur.'
+ .format(master_list='\n'.join(' ' + master for master in es_master_names))
+ )
+
+ return error_msgs
+
+ def _check_elasticsearch_node_list(self, pods_by_name, task_vars):
+ """Check that reported ES masters are accounted for by pods. Returns: list of error strings"""
+
+ if not pods_by_name:
+ return ['No logging Elasticsearch masters were found. Is logging deployed?']
+
+ # get ES cluster nodes
+ node_cmd = self._build_es_curl_cmd(list(pods_by_name.keys())[0], 'https://localhost:9200/_nodes')
+ cluster_node_data = self._exec_oc(node_cmd, [], task_vars)
+ try:
+ cluster_nodes = json.loads(cluster_node_data)['nodes']
+ except (ValueError, KeyError):
+ return [
+ 'Failed to query Elasticsearch for the list of ES nodes. The output was:\n' +
+ cluster_node_data
+ ]
+
+ # Try to match all ES-reported node hosts to known pods.
+ error_msgs = []
+ for node in cluster_nodes.values():
+ # Note that with 1.4/3.4 the pod IP may be used as the master name
+ if not any(node['host'] in (pod_name, pod['status'].get('podIP'))
+ for pod_name, pod in pods_by_name.items()):
+ error_msgs.append(
+ 'The Elasticsearch cluster reports a member node "{node}"\n'
+ 'that does not correspond to any known ES pod.'.format(node=node['host'])
+ )
+
+ return error_msgs
+
+ def _check_es_cluster_health(self, pods_by_name, task_vars):
+ """Exec into the elasticsearch pods and check the cluster health. Returns: list of errors"""
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ cluster_health_cmd = self._build_es_curl_cmd(pod_name, 'https://localhost:9200/_cluster/health?pretty=true')
+ cluster_health_data = self._exec_oc(cluster_health_cmd, [], task_vars)
+ try:
+ health_res = json.loads(cluster_health_data)
+ if not health_res or not health_res.get('status'):
+ raise ValueError()
+ except ValueError:
+ error_msgs.append(
+ 'Could not retrieve cluster health status from logging ES pod "{pod}".\n'
+ 'Response was:\n{output}'.format(pod=pod_name, output=cluster_health_data)
+ )
+ continue
+
+ if health_res['status'] not in ['green', 'yellow']:
+ error_msgs.append(
+ 'Elasticsearch cluster health status is RED according to pod "{}"'.format(pod_name)
+ )
+
+ return error_msgs
+
+ def _check_elasticsearch_diskspace(self, pods_by_name, task_vars):
+ """
+ Exec into an ES pod and query the diskspace on the persistent volume.
+ Returns: list of errors
+ """
+ error_msgs = []
+ for pod_name in pods_by_name.keys():
+ df_cmd = 'exec {} -- df --output=ipcent,pcent /elasticsearch/persistent'.format(pod_name)
+ disk_output = self._exec_oc(df_cmd, [], task_vars)
+ lines = disk_output.splitlines()
+ # expecting one header looking like 'IUse% Use%' and one body line
+ body_re = r'\s*(\d+)%?\s+(\d+)%?\s*$'
+ if len(lines) != 2 or len(lines[0].split()) != 2 or not re.match(body_re, lines[1]):
+ error_msgs.append(
+ 'Could not retrieve storage usage from logging ES pod "{pod}".\n'
+ 'Response to `df` command was:\n{output}'.format(pod=pod_name, output=disk_output)
+ )
+ continue
+ inode_pct, disk_pct = re.match(body_re, lines[1]).groups()
+
+ inode_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_inode_pct', default='90')
+ if int(inode_pct) >= int(inode_pct_thresh):
+ error_msgs.append(
+ 'Inode percent usage on the storage volume for logging ES pod "{pod}"\n'
+ ' is {pct}, greater than threshold {limit}.\n'
+ ' Note: threshold can be specified in inventory with {param}'.format(
+ pod=pod_name,
+ pct=str(inode_pct),
+ limit=str(inode_pct_thresh),
+ param='openshift_check_efk_es_inode_pct',
+ ))
+ disk_pct_thresh = get_var(task_vars, 'openshift_check_efk_es_storage_pct', default='80')
+ if int(disk_pct) >= int(disk_pct_thresh):
+ error_msgs.append(
+ 'Disk percent usage on the storage volume for logging ES pod "{pod}"\n'
+ ' is {pct}, greater than threshold {limit}.\n'
+ ' Note: threshold can be specified in inventory with {param}'.format(
+ pod=pod_name,
+ pct=str(disk_pct),
+ limit=str(disk_pct_thresh),
+ param='openshift_check_efk_es_storage_pct',
+ ))
+
+ return error_msgs
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Elasticsearch, self).exec_oc(
+ self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars,
+ )
diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
new file mode 100644
index 000000000..627567293
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd.py
@@ -0,0 +1,170 @@
+"""
+Module for performing checks on an Fluentd logging deployment
+"""
+
+import json
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Fluentd(LoggingCheck):
+ """Module that checks an integrated logging Fluentd deployment"""
+ name = "fluentd"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ fluentd_pods, error = super(Fluentd, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "fluentd",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_fluentd(fluentd_pods, task_vars)
+
+ if check_error:
+ msg = ("The following Fluentd deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Fluentd deployment.'}
+
+ @staticmethod
+ def _filter_fluentd_labeled_nodes(nodes_by_name, node_selector):
+ """Filter to all nodes with fluentd label. Returns dict(name: node), error string"""
+ label, value = node_selector.split('=', 1)
+ fluentd_nodes = {
+ name: node for name, node in nodes_by_name.items()
+ if node['metadata']['labels'].get(label) == value
+ }
+ if not fluentd_nodes:
+ return None, (
+ 'There are no nodes with the fluentd label {label}.\n'
+ 'This means no logs will be aggregated from the nodes.'
+ ).format(label=node_selector)
+ return fluentd_nodes, None
+
+ @staticmethod
+ def _check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars):
+ """Note if nodes are not labeled as expected. Returns: error string"""
+ intended_nodes = get_var(task_vars, 'openshift_logging_fluentd_hosts', default=['--all'])
+ if not intended_nodes or '--all' in intended_nodes:
+ intended_nodes = nodes_by_name.keys()
+ nodes_missing_labels = set(intended_nodes) - set(fluentd_nodes.keys())
+ if nodes_missing_labels:
+ return (
+ 'The following nodes are supposed to be labeled with {label} but are not:\n'
+ ' {nodes}\n'
+ 'Fluentd will not aggregate logs from these nodes.'
+ ).format(label=node_selector, nodes=', '.join(nodes_missing_labels))
+ return None
+
+ @staticmethod
+ def _check_nodes_have_fluentd(pods, fluentd_nodes):
+ """Make sure fluentd is on all the labeled nodes. Returns: error string"""
+ unmatched_nodes = fluentd_nodes.copy()
+ node_names_by_label = {
+ node['metadata']['labels']['kubernetes.io/hostname']: name
+ for name, node in fluentd_nodes.items()
+ }
+ node_names_by_internal_ip = {
+ address['address']: name
+ for name, node in fluentd_nodes.items()
+ for address in node['status']['addresses']
+ if address['type'] == "InternalIP"
+ }
+ for pod in pods:
+ for name in [
+ pod['spec']['nodeName'],
+ node_names_by_internal_ip.get(pod['spec']['nodeName']),
+ node_names_by_label.get(pod.get('spec', {}).get('host')),
+ ]:
+ unmatched_nodes.pop(name, None)
+ if unmatched_nodes:
+ return (
+ 'The following nodes are supposed to have a Fluentd pod but do not:\n'
+ '{nodes}'
+ 'These nodes will not have their logs aggregated.'
+ ).format(nodes=''.join(
+ " {}\n".format(name)
+ for name in unmatched_nodes.keys()
+ ))
+ return None
+
+ def _check_fluentd_pods_running(self, pods):
+ """Make sure all fluentd pods are running. Returns: error string"""
+ not_running = super(Fluentd, self).not_running_pods(pods)
+ if not_running:
+ return (
+ 'The following Fluentd pods are supposed to be running but are not:\n'
+ '{pods}'
+ 'These pods will not aggregate logs from their nodes.'
+ ).format(pods=''.join(
+ " {} ({})\n".format(pod['metadata']['name'], pod['spec'].get('host', 'None'))
+ for pod in not_running
+ ))
+ return None
+
+ def check_fluentd(self, pods, task_vars):
+ """Verify fluentd is running everywhere. Returns: error string"""
+
+ node_selector = get_var(task_vars, 'openshift_logging_fluentd_nodeselector',
+ default='logging-infra-fluentd=true')
+
+ nodes_by_name, error = self.get_nodes_by_name(task_vars)
+
+ if error:
+ return error
+ fluentd_nodes, error = self._filter_fluentd_labeled_nodes(nodes_by_name, node_selector)
+ if error:
+ return error
+
+ error_msgs = []
+ error = self._check_node_labeling(nodes_by_name, fluentd_nodes, node_selector, task_vars)
+ if error:
+ error_msgs.append(error)
+ error = self._check_nodes_have_fluentd(pods, fluentd_nodes)
+ if error:
+ error_msgs.append(error)
+ error = self._check_fluentd_pods_running(pods)
+ if error:
+ error_msgs.append(error)
+
+ # Make sure there are no extra fluentd pods
+ if len(pods) > len(fluentd_nodes):
+ error_msgs.append(
+ 'There are more Fluentd pods running than nodes labeled.\n'
+ 'This may not cause problems with logging but it likely indicates something wrong.'
+ )
+
+ return '\n'.join(error_msgs)
+
+ def get_nodes_by_name(self, task_vars):
+ """Retrieve all the node definitions. Returns: dict(name: node), error string"""
+ nodes_json = self._exec_oc("get nodes -o json", [], task_vars)
+ try:
+ nodes = json.loads(nodes_json)
+ except ValueError: # no valid json - should not happen
+ return None, "Could not obtain a list of nodes to validate fluentd. Output from oc get:\n" + nodes_json
+ if not nodes or not nodes.get('items'): # also should not happen
+ return None, "No nodes appear to be defined according to the API."
+ return {
+ node['metadata']['name']: node
+ for node in nodes['items']
+ }, None
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Fluentd, self).exec_oc(self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/kibana.py b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
new file mode 100644
index 000000000..442f407b1
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/kibana.py
@@ -0,0 +1,229 @@
+"""
+Module for performing checks on a Kibana logging deployment
+"""
+
+import json
+import ssl
+
+try:
+ from urllib2 import HTTPError, URLError
+ import urllib2
+except ImportError:
+ from urllib.error import HTTPError, URLError
+ import urllib.request as urllib2
+
+from openshift_checks import get_var
+from openshift_checks.logging.logging import LoggingCheck
+
+
+class Kibana(LoggingCheck):
+ """Module that checks an integrated logging Kibana deployment"""
+
+ name = "kibana"
+ tags = ["health", "logging"]
+
+ logging_namespace = None
+
+ def run(self, tmp, task_vars):
+ """Check various things and gather errors. Returns: result as hash"""
+
+ self.logging_namespace = get_var(task_vars, "openshift_logging_namespace", default="logging")
+ kibana_pods, error = super(Kibana, self).get_pods_for_component(
+ self.execute_module,
+ self.logging_namespace,
+ "kibana",
+ task_vars,
+ )
+ if error:
+ return {"failed": True, "changed": False, "msg": error}
+ check_error = self.check_kibana(kibana_pods)
+
+ if not check_error:
+ check_error = self._check_kibana_route(task_vars)
+
+ if check_error:
+ msg = ("The following Kibana deployment issue was found:"
+ "\n-------\n"
+ "{}".format(check_error))
+ return {"failed": True, "changed": False, "msg": msg}
+
+ # TODO(lmeyer): run it all again for the ops cluster
+ return {"failed": False, "changed": False, "msg": 'No problems found with Kibana deployment.'}
+
+ def _verify_url_internal(self, url, task_vars):
+ """
+ Try to reach a URL from the host.
+ Returns: success (bool), reason (for failure)
+ """
+ args = dict(
+ url=url,
+ follow_redirects='none',
+ validate_certs='no', # likely to be signed with internal CA
+ # TODO(lmeyer): give users option to validate certs
+ status_code=302,
+ )
+ result = self.execute_module('uri', args, task_vars)
+ if result.get('failed'):
+ return result['msg']
+ return None
+
+ @staticmethod
+ def _verify_url_external(url):
+ """
+ Try to reach a URL from ansible control host.
+ Returns: success (bool), reason (for failure)
+ """
+ # This actually checks from the ansible control host, which may or may not
+ # really be "external" to the cluster.
+
+ # Disable SSL cert validation to work around internally signed certs
+ ctx = ssl.create_default_context()
+ ctx.check_hostname = False # or setting CERT_NONE is refused
+ ctx.verify_mode = ssl.CERT_NONE
+
+ # Verify that the url is returning a valid response
+ try:
+ # We only care if the url connects and responds
+ return_code = urllib2.urlopen(url, context=ctx).getcode()
+ except HTTPError as httperr:
+ return httperr.reason
+ except URLError as urlerr:
+ return str(urlerr)
+
+ # there appears to be no way to prevent urlopen from following redirects
+ if return_code != 200:
+ return 'Expected success (200) but got return code {}'.format(int(return_code))
+
+ return None
+
+ def check_kibana(self, pods):
+ """Check to see if Kibana is up and working. Returns: error string."""
+
+ if not pods:
+ return "There are no Kibana pods deployed, so no access to the logging UI."
+
+ not_running = self.not_running_pods(pods)
+ if len(not_running) == len(pods):
+ return "No Kibana pod is in a running state, so there is no access to the logging UI."
+ elif not_running:
+ return (
+ "The following Kibana pods are not currently in a running state:\n"
+ "{pods}"
+ "However at least one is, so service may not be impacted."
+ ).format(pods="".join(" " + pod['metadata']['name'] + "\n" for pod in not_running))
+
+ return None
+
+ def _get_kibana_url(self, task_vars):
+ """
+ Get kibana route or report error.
+ Returns: url (or empty), reason for failure
+ """
+
+ # Get logging url
+ get_route = self._exec_oc("get route logging-kibana -o json", [], task_vars)
+ if not get_route:
+ return None, 'no_route_exists'
+
+ route = json.loads(get_route)
+
+ # check that the route has been accepted by a router
+ ingress = route["status"]["ingress"]
+ # ingress can be null if there is no router, or empty if not routed
+ if not ingress or not ingress[0]:
+ return None, 'route_not_accepted'
+
+ host = route.get("spec", {}).get("host")
+ if not host:
+ return None, 'route_missing_host'
+
+ return 'https://{}/'.format(host), None
+
+ def _check_kibana_route(self, task_vars):
+ """
+ Check to see if kibana route is up and working.
+ Returns: error string
+ """
+ known_errors = dict(
+ no_route_exists=(
+ 'No route is defined for Kibana in the logging namespace,\n'
+ 'so the logging stack is not accessible. Is logging deployed?\n'
+ 'Did something remove the logging-kibana route?'
+ ),
+ route_not_accepted=(
+ 'The logging-kibana route is not being routed by any router.\n'
+ 'Is the router deployed and working?'
+ ),
+ route_missing_host=(
+ 'The logging-kibana route has no hostname defined,\n'
+ 'which should never happen. Did something alter its definition?'
+ ),
+ )
+
+ kibana_url, error = self._get_kibana_url(task_vars)
+ if not kibana_url:
+ return known_errors.get(error, error)
+
+ # first, check that kibana is reachable from the master.
+ error = self._verify_url_internal(kibana_url, task_vars)
+ if error:
+ if 'urlopen error [Errno 111] Connection refused' in error:
+ error = (
+ 'Failed to connect from this master to Kibana URL {url}\n'
+ 'Is kibana running, and is at least one router routing to it?'
+ ).format(url=kibana_url)
+ elif 'urlopen error [Errno -2] Name or service not known' in error:
+ error = (
+ 'Failed to connect from this master to Kibana URL {url}\n'
+ 'because the hostname does not resolve.\n'
+ 'Is DNS configured for the Kibana hostname?'
+ ).format(url=kibana_url)
+ elif 'Status code was not' in error:
+ error = (
+ 'A request from this master to the Kibana URL {url}\n'
+ 'did not return the correct status code (302).\n'
+ 'This could mean that Kibana is malfunctioning, the hostname is\n'
+ 'resolving incorrectly, or other network issues. The output was:\n'
+ ' {error}'
+ ).format(url=kibana_url, error=error)
+ return 'Error validating the logging Kibana route:\n' + error
+
+ # in production we would like the kibana route to work from outside the
+ # cluster too; but that may not be the case, so allow disabling just this part.
+ if not get_var(task_vars, "openshift_check_efk_kibana_external", default=True):
+ return None
+ error = self._verify_url_external(kibana_url)
+ if error:
+ if 'urlopen error [Errno 111] Connection refused' in error:
+ error = (
+ 'Failed to connect from the Ansible control host to Kibana URL {url}\n'
+ 'Is the router for the Kibana hostname exposed externally?'
+ ).format(url=kibana_url)
+ elif 'urlopen error [Errno -2] Name or service not known' in error:
+ error = (
+ 'Failed to resolve the Kibana hostname in {url}\n'
+ 'from the Ansible control host.\n'
+ 'Is DNS configured to resolve this Kibana hostname externally?'
+ ).format(url=kibana_url)
+ elif 'Expected success (200)' in error:
+ error = (
+ 'A request to Kibana at {url}\n'
+ 'returned the wrong error code:\n'
+ ' {error}\n'
+ 'This could mean that Kibana is malfunctioning, the hostname is\n'
+ 'resolving incorrectly, or other network issues.'
+ ).format(url=kibana_url, error=error)
+ error = (
+ 'Error validating the logging Kibana route:\n{error}\n'
+ 'To disable external Kibana route validation, set in your inventory:\n'
+ ' openshift_check_efk_kibana_external=False'
+ ).format(error=error)
+ return error
+ return None
+
+ def _exec_oc(self, cmd_str, extra_args, task_vars):
+ return super(Kibana, self).exec_oc(self.execute_module,
+ self.logging_namespace,
+ cmd_str,
+ extra_args,
+ task_vars)
diff --git a/roles/openshift_health_checker/openshift_checks/logging/logging.py b/roles/openshift_health_checker/openshift_checks/logging/logging.py
new file mode 100644
index 000000000..05b4d300c
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/logging/logging.py
@@ -0,0 +1,96 @@
+"""
+Util functions for performing checks on an Elasticsearch, Fluentd, and Kibana stack
+"""
+
+import json
+import os
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+
+
+class LoggingCheck(OpenShiftCheck):
+ """Base class for logging component checks"""
+
+ name = "logging"
+
+ @classmethod
+ def is_active(cls, task_vars):
+ return super(LoggingCheck, cls).is_active(task_vars) and cls.is_first_master(task_vars)
+
+ @staticmethod
+ def is_first_master(task_vars):
+ """Run only on first master and only when logging is configured. Returns: bool"""
+ logging_deployed = get_var(task_vars, "openshift_hosted_logging_deploy", default=True)
+ # Note: It would be nice to use membership in oo_first_master group, however for now it
+ # seems best to avoid requiring that setup and just check this is the first master.
+ hostname = get_var(task_vars, "ansible_ssh_host") or [None]
+ masters = get_var(task_vars, "groups", "masters", default=None) or [None]
+ return logging_deployed and masters[0] == hostname
+
+ def run(self, tmp, task_vars):
+ pass
+
+ def get_pods_for_component(self, execute_module, namespace, logging_component, task_vars):
+ """Get all pods for a given component. Returns: list of pods for component, error string"""
+ pod_output = self.exec_oc(
+ execute_module,
+ namespace,
+ "get pods -l component={} -o json".format(logging_component),
+ [],
+ task_vars
+ )
+ try:
+ pods = json.loads(pod_output)
+ if not pods or not pods.get('items'):
+ raise ValueError()
+ except ValueError:
+ # successful run but non-parsing data generally means there were no pods in the namespace
+ return None, 'There are no pods in the {} namespace. Is logging deployed?'.format(namespace)
+
+ return pods['items'], None
+
+ @staticmethod
+ def not_running_pods(pods):
+ """Returns: list of pods not in a ready and running state"""
+ return [
+ pod for pod in pods
+ if any(
+ container['ready'] is False
+ for container in pod['status']['containerStatuses']
+ ) or not any(
+ condition['type'] == 'Ready' and condition['status'] == 'True'
+ for condition in pod['status']['conditions']
+ )
+ ]
+
+ @staticmethod
+ def exec_oc(execute_module=None, namespace="logging", cmd_str="", extra_args=None, task_vars=None):
+ """
+ Execute an 'oc' command in the remote host.
+ Returns: output of command and namespace,
+ or raises OpenShiftCheckException on error
+ """
+ config_base = get_var(task_vars, "openshift", "common", "config_base")
+ args = {
+ "namespace": namespace,
+ "config_file": os.path.join(config_base, "master", "admin.kubeconfig"),
+ "cmd": cmd_str,
+ "extra_args": list(extra_args) if extra_args else [],
+ }
+
+ result = execute_module("ocutil", args, task_vars)
+ if result.get("failed"):
+ msg = (
+ 'Unexpected error using `oc` to validate the logging stack components.\n'
+ 'Error executing `oc {cmd}`:\n'
+ '{error}'
+ ).format(cmd=args['cmd'], error=result['result'])
+
+ if result['result'] == '[Errno 2] No such file or directory':
+ msg = (
+ "This host is supposed to be a master but does not have the `oc` command where expected.\n"
+ "Has an installation been run on this host yet?"
+ )
+ raise OpenShiftCheckException(msg)
+
+ return result.get("result", "")
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
index 28805dc37..f4e31065f 100644
--- a/roles/openshift_health_checker/openshift_checks/memory_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -1,6 +1,9 @@
# pylint: disable=missing-docstring
from openshift_checks import OpenShiftCheck, get_var
+MIB = 2**20
+GIB = 2**30
+
class MemoryAvailability(OpenShiftCheck):
"""Check that recommended memory is available."""
@@ -11,10 +14,12 @@ class MemoryAvailability(OpenShiftCheck):
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_memory_bytes = {
- "masters": 16 * 10**9,
- "nodes": 8 * 10**9,
- "etcd": 20 * 10**9,
+ "masters": 16 * GIB,
+ "nodes": 8 * GIB,
+ "etcd": 8 * GIB,
}
+ # https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal
+ memtotal_adjustment = 1 * GIB
@classmethod
def is_active(cls, task_vars):
@@ -25,19 +30,21 @@ class MemoryAvailability(OpenShiftCheck):
def run(self, tmp, task_vars):
group_names = get_var(task_vars, "group_names")
- total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * 10**6
+ total_memory_bytes = get_var(task_vars, "ansible_memtotal_mb") * MIB
- min_memory_bytes = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+ recommended_min = max(self.recommended_memory_bytes.get(name, 0) for name in group_names)
+ configured_min = float(get_var(task_vars, "openshift_check_min_host_memory_gb", default=0)) * GIB
+ min_memory_bytes = configured_min or recommended_min
- if total_memory_bytes < min_memory_bytes:
+ if total_memory_bytes + self.memtotal_adjustment < min_memory_bytes:
return {
'failed': True,
'msg': (
- 'Available memory ({available:.1f} GB) '
- 'below recommended value ({recommended:.1f} GB)'
+ 'Available memory ({available:.1f} GiB) is too far '
+ 'below recommended value ({recommended:.1f} GiB)'
).format(
- available=float(total_memory_bytes) / 10**9,
- recommended=float(min_memory_bytes) / 10**9,
+ available=float(total_memory_bytes) / GIB,
+ recommended=float(min_memory_bytes) / GIB,
),
}
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index 20d160eaf..7f3d78cc4 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -1,4 +1,3 @@
-# pylint: disable=missing-docstring,too-few-public-methods
"""
Mixin classes meant to be used with subclasses of OpenShiftCheck.
"""
@@ -8,8 +7,49 @@ from openshift_checks import get_var
class NotContainerizedMixin(object):
"""Mixin for checks that are only active when not in containerized mode."""
+ # permanent # pylint: disable=too-few-public-methods
+ # Reason: The mixin is not intended to stand on its own as a class.
@classmethod
def is_active(cls, task_vars):
+ """Only run on non-containerized hosts."""
is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
return super(NotContainerizedMixin, cls).is_active(task_vars) and not is_containerized
+
+
+class DockerHostMixin(object):
+ """Mixin for checks that are only active on hosts that require Docker."""
+
+ dependencies = []
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Only run on hosts that depend on Docker."""
+ is_containerized = get_var(task_vars, "openshift", "common", "is_containerized")
+ is_node = "nodes" in get_var(task_vars, "group_names", default=[])
+ return super(DockerHostMixin, cls).is_active(task_vars) and (is_containerized or is_node)
+
+ def ensure_dependencies(self, task_vars):
+ """
+ Ensure that docker-related packages exist, but not on atomic hosts
+ (which would not be able to install but should already have them).
+ Returns: msg, failed, changed
+ """
+ if get_var(task_vars, "openshift", "common", "is_atomic"):
+ return "", False, False
+
+ # NOTE: we would use the "package" module but it's actually an action plugin
+ # and it's not clear how to invoke one of those. This is about the same anyway:
+ pkg_manager = get_var(task_vars, "ansible_pkg_mgr", default="yum")
+ result = self.module_executor(pkg_manager, {"name": self.dependencies, "state": "present"}, task_vars)
+ msg = result.get("msg", "")
+ if result.get("failed"):
+ if "No package matching" in msg:
+ msg = "Ensure that all required dependencies can be installed via `yum`.\n"
+ msg = (
+ "Unable to install required packages on this host:\n"
+ " {deps}\n{msg}"
+ ).format(deps=',\n '.join(self.dependencies), msg=msg)
+ failed = result.get("failed", False) or result.get("rc", 0) != 0
+ changed = result.get("changed", False)
+ return msg, failed, changed
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
new file mode 100644
index 000000000..1e45ae3af
--- /dev/null
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -0,0 +1,78 @@
+"""
+Ansible module for determining if an installed version of Open vSwitch is incompatible with the
+currently installed version of OpenShift.
+"""
+
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
+from openshift_checks.mixins import NotContainerizedMixin
+
+
+class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
+ """Check that packages in a package_list are installed on the host
+ and are the correct version as determined by an OpenShift installation.
+ """
+
+ name = "ovs_version"
+ tags = ["health"]
+
+ openshift_to_ovs_version = {
+ "3.6": "2.6",
+ "3.5": "2.6",
+ "3.4": "2.4",
+ }
+
+ # map major release versions across releases
+ # to a common major version
+ openshift_major_release_version = {
+ "1": "3",
+ }
+
+ @classmethod
+ def is_active(cls, task_vars):
+ """Skip hosts that do not have package requirements."""
+ group_names = get_var(task_vars, "group_names", default=[])
+ master_or_node = 'masters' in group_names or 'nodes' in group_names
+ return super(OvsVersion, cls).is_active(task_vars) and master_or_node
+
+ def run(self, tmp, task_vars):
+ args = {
+ "package_list": [
+ {
+ "name": "openvswitch",
+ "version": self.get_required_ovs_version(task_vars),
+ },
+ ],
+ }
+ return self.execute_module("rpm_version", args, task_vars)
+
+ def get_required_ovs_version(self, task_vars):
+ """Return the correct Open vSwitch version for the current OpenShift version"""
+ openshift_version = self._get_openshift_version(task_vars)
+
+ if float(openshift_version) < 3.5:
+ return self.openshift_to_ovs_version["3.4"]
+
+ ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
+ if ovs_version:
+ return self.openshift_to_ovs_version[str(openshift_version)]
+
+ msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def _get_openshift_version(self, task_vars):
+ openshift_version = get_var(task_vars, "openshift_image_tag")
+ if openshift_version and openshift_version[0] == 'v':
+ openshift_version = openshift_version[1:]
+
+ return self._parse_version(openshift_version)
+
+ def _parse_version(self, version):
+ components = version.split(".")
+ if not components or len(components) < 2:
+ msg = "An invalid version of OpenShift was found for this host: {}"
+ raise OpenShiftCheckException(msg.format(version))
+
+ if components[0] in self.openshift_major_release_version:
+ components[0] = self.openshift_major_release_version[components[0]]
+
+ return '.'.join(components[:2])
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index 682f6bd40..2e737818b 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -1,5 +1,5 @@
# pylint: disable=missing-docstring
-from openshift_checks import OpenShiftCheck, get_var
+from openshift_checks import OpenShiftCheck, OpenShiftCheckException, get_var
from openshift_checks.mixins import NotContainerizedMixin
@@ -9,6 +9,25 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
name = "package_version"
tags = ["preflight"]
+ openshift_to_ovs_version = {
+ "3.6": "2.6",
+ "3.5": "2.6",
+ "3.4": "2.4",
+ }
+
+ openshift_to_docker_version = {
+ "3.1": "1.8",
+ "3.2": "1.10",
+ "3.3": "1.10",
+ "3.4": "1.12",
+ }
+
+ # map major release versions across releases
+ # to a common major version
+ openshift_major_release_version = {
+ "1": "3",
+ }
+
@classmethod
def is_active(cls, task_vars):
"""Skip hosts that do not have package requirements."""
@@ -17,9 +36,90 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
return super(PackageVersion, cls).is_active(task_vars) and master_or_node
def run(self, tmp, task_vars):
+ rpm_prefix = get_var(task_vars, "openshift", "common", "service_type")
+ openshift_release = get_var(task_vars, "openshift_release", default='')
+ deployment_type = get_var(task_vars, "openshift_deployment_type")
+ check_multi_minor_release = deployment_type in ['openshift-enterprise']
+
args = {
- "requested_openshift_release": get_var(task_vars, "openshift_release", default=''),
- "openshift_deployment_type": get_var(task_vars, "openshift_deployment_type"),
- "rpm_prefix": get_var(task_vars, "openshift", "common", "service_type"),
+ "package_list": [
+ {
+ "name": "openvswitch",
+ "version": self.get_required_ovs_version(task_vars),
+ "check_multi": False,
+ },
+ {
+ "name": "docker",
+ "version": self.get_required_docker_version(task_vars),
+ "check_multi": False,
+ },
+ {
+ "name": "{}".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ {
+ "name": "{}-master".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ {
+ "name": "{}-node".format(rpm_prefix),
+ "version": openshift_release,
+ "check_multi": check_multi_minor_release,
+ },
+ ],
}
+
return self.execute_module("aos_version", args, tmp, task_vars)
+
+ def get_required_ovs_version(self, task_vars):
+ """Return the correct Open vSwitch version for the current OpenShift version.
+ If the current OpenShift version is >= 3.5, ensure Open vSwitch version 2.6,
+ Else ensure Open vSwitch version 2.4"""
+ openshift_version = self.get_openshift_version(task_vars)
+
+ if float(openshift_version) < 3.5:
+ return self.openshift_to_ovs_version["3.4"]
+
+ ovs_version = self.openshift_to_ovs_version.get(str(openshift_version))
+ if ovs_version:
+ return ovs_version
+
+ msg = "There is no recommended version of Open vSwitch for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def get_required_docker_version(self, task_vars):
+ """Return the correct Docker version for the current OpenShift version.
+ If the OpenShift version is 3.1, ensure Docker version 1.8.
+ If the OpenShift version is 3.2 or 3.3, ensure Docker version 1.10.
+ If the current OpenShift version is >= 3.4, ensure Docker version 1.12."""
+ openshift_version = self.get_openshift_version(task_vars)
+
+ if float(openshift_version) >= 3.4:
+ return self.openshift_to_docker_version["3.4"]
+
+ docker_version = self.openshift_to_docker_version.get(str(openshift_version))
+ if docker_version:
+ return docker_version
+
+ msg = "There is no recommended version of Docker for the current version of OpenShift: {}"
+ raise OpenShiftCheckException(msg.format(openshift_version))
+
+ def get_openshift_version(self, task_vars):
+ openshift_version = get_var(task_vars, "openshift_image_tag")
+ if openshift_version and openshift_version[0] == 'v':
+ openshift_version = openshift_version[1:]
+
+ return self.parse_version(openshift_version)
+
+ def parse_version(self, version):
+ components = version.split(".")
+ if not components or len(components) < 2:
+ msg = "An invalid version of OpenShift was found for this host: {}"
+ raise OpenShiftCheckException(msg.format(version))
+
+ if components[0] in self.openshift_major_release_version:
+ components[0] = self.openshift_major_release_version[components[0]]
+
+ return '.'.join(components[:2])
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index 2693ae37b..6ebf0ebb2 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -67,6 +67,7 @@ def changed(result):
return result.get('changed', False)
+# tests whether task is skipped, not individual checks
def skipped(result):
return result.get('skipped', False)
@@ -101,7 +102,20 @@ def test_action_plugin_skip_non_active_checks(plugin, task_vars, monkeypatch):
result = plugin.run(tmp=None, task_vars=task_vars)
- assert result['checks']['fake_check'] == {'skipped': True}
+ assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Not active for this host")
+ assert not failed(result)
+ assert not changed(result)
+ assert not skipped(result)
+
+
+def test_action_plugin_skip_disabled_checks(plugin, task_vars, monkeypatch):
+ checks = [fake_check('fake_check', is_active=True)]
+ monkeypatch.setattr('openshift_checks.OpenShiftCheck.subclasses', classmethod(lambda cls: checks))
+
+ task_vars['openshift_disable_check'] = 'fake_check'
+ result = plugin.run(tmp=None, task_vars=task_vars)
+
+ assert result['checks']['fake_check'] == dict(skipped=True, skipped_reason="Disabled by user request")
assert not failed(result)
assert not changed(result)
assert not skipped(result)
diff --git a/roles/openshift_health_checker/test/aos_version_test.py b/roles/openshift_health_checker/test/aos_version_test.py
index 39c86067a..697805dd2 100644
--- a/roles/openshift_health_checker/test/aos_version_test.py
+++ b/roles/openshift_health_checker/test/aos_version_test.py
@@ -4,89 +4,118 @@ import aos_version
from collections import namedtuple
Package = namedtuple('Package', ['name', 'version'])
-expected_pkgs = set(['spam', 'eggs'])
+expected_pkgs = {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+}
-@pytest.mark.parametrize('pkgs, requested_release, expect_not_found', [
+@pytest.mark.parametrize('pkgs, expect_not_found', [
(
[],
- '3.2.1',
- expected_pkgs, # none found
+ {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # none found
),
(
[Package('spam', '3.2.1')],
- '3.2',
- ['eggs'], # completely missing
+ {
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # completely missing
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
- '3.2',
- ['eggs'], # not the right version
+ {
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # not the right version
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.2.1')],
- '3.2',
- [], # all found
+ {}, # all found
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.2.1.5')],
- '3.2.1',
- [], # found with more specific version
+ {}, # found with more specific version
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5')],
- '3.2.1',
- ['spam'], # eggs found with multiple versions
+ {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ "check_multi": False,
+ }
+ }, # eggs found with multiple versions
),
])
-def test_check_pkgs_for_precise_version(pkgs, requested_release, expect_not_found):
+def test_check_pkgs_for_precise_version(pkgs, expect_not_found):
if expect_not_found:
with pytest.raises(aos_version.PreciseVersionNotFound) as e:
- aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release)
- assert set(expect_not_found) == set(e.value.problem_pkgs)
+ aos_version._check_precise_version_found(pkgs, expected_pkgs)
+
+ assert list(expect_not_found.values()) == e.value.problem_pkgs
else:
- aos_version._check_precise_version_found(pkgs, expected_pkgs, requested_release)
+ aos_version._check_precise_version_found(pkgs, expected_pkgs)
-@pytest.mark.parametrize('pkgs, requested_release, expect_higher', [
+@pytest.mark.parametrize('pkgs, expect_higher', [
(
[],
- '3.2.1',
[],
),
(
- [Package('spam', '3.2.1')],
- '3.2',
+ [Package('spam', '3.2.1.9')],
[], # more precise but not strictly higher
),
(
[Package('spam', '3.3')],
- '3.2.1',
['spam-3.3'], # lower precision, but higher
),
(
[Package('spam', '3.2.1'), Package('eggs', '3.3.2')],
- '3.2',
['eggs-3.3.2'], # one too high
),
(
[Package('eggs', '1.2.3'), Package('eggs', '3.2.1.5'), Package('eggs', '3.4')],
- '3.2.1',
['eggs-3.4'], # multiple versions, one is higher
),
(
[Package('eggs', '3.2.1'), Package('eggs', '3.4'), Package('eggs', '3.3')],
- '3.2.1',
['eggs-3.4'], # multiple versions, two are higher
),
])
-def test_check_pkgs_for_greater_version(pkgs, requested_release, expect_higher):
+def test_check_pkgs_for_greater_version(pkgs, expect_higher):
if expect_higher:
with pytest.raises(aos_version.FoundHigherVersion) as e:
- aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release)
+ aos_version._check_higher_version_found(pkgs, expected_pkgs)
assert set(expect_higher) == set(e.value.problem_pkgs)
else:
- aos_version._check_higher_version_found(pkgs, expected_pkgs, requested_release)
+ aos_version._check_higher_version_found(pkgs, expected_pkgs)
@pytest.mark.parametrize('pkgs, expect_to_flag_pkgs', [
diff --git a/roles/openshift_health_checker/test/curator_test.py b/roles/openshift_health_checker/test/curator_test.py
new file mode 100644
index 000000000..ae108c96e
--- /dev/null
+++ b/roles/openshift_health_checker/test/curator_test.py
@@ -0,0 +1,68 @@
+import pytest
+
+from openshift_checks.logging.curator import Curator
+
+
+def canned_curator(exec_oc=None):
+ """Create a Curator check object with canned exec_oc method"""
+ check = Curator("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+not_running_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": False}],
+ "conditions": [{"status": "False", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ "no Curator pods",
+ ),
+ (
+ [plain_curator_pod],
+ None,
+ ),
+ (
+ [not_running_curator_pod],
+ "not currently in a running state",
+ ),
+ (
+ [plain_curator_pod, plain_curator_pod],
+ "more than one Curator pod",
+ ),
+])
+def test_get_curator_pods(pods, expect_error):
+ check = canned_curator()
+ error = check.check_curator(pods)
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index 970b474d7..b353fa610 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -42,9 +42,10 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
assert word in str(excinfo.value)
-@pytest.mark.parametrize('group_names,ansible_mounts', [
+@pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [
(
['masters'],
+ 0,
[{
'mount': '/',
'size_available': 40 * 10**9 + 1,
@@ -52,6 +53,7 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
),
(
['nodes'],
+ 0,
[{
'mount': '/',
'size_available': 15 * 10**9 + 1,
@@ -59,6 +61,7 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
),
(
['etcd'],
+ 0,
[{
'mount': '/',
'size_available': 20 * 10**9 + 1,
@@ -66,6 +69,15 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
),
(
['etcd'],
+ 1, # configure lower threshold
+ [{
+ 'mount': '/',
+ 'size_available': 1 * 10**9 + 1, # way smaller than recommended
+ }],
+ ),
+ (
+ ['etcd'],
+ 0,
[{
# not enough space on / ...
'mount': '/',
@@ -77,9 +89,10 @@ def test_cannot_determine_available_disk(ansible_mounts, extra_words):
}],
),
])
-def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
+def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts):
task_vars = dict(
group_names=group_names,
+ openshift_check_min_host_disk_gb=configured_min,
ansible_mounts=ansible_mounts,
)
@@ -89,9 +102,10 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
assert not result.get('failed', False)
-@pytest.mark.parametrize('group_names,ansible_mounts,extra_words', [
+@pytest.mark.parametrize('group_names,configured_min,ansible_mounts,extra_words', [
(
['masters'],
+ 0,
[{
'mount': '/',
'size_available': 1,
@@ -99,7 +113,17 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
['0.0 GB'],
),
(
+ ['masters'],
+ 100, # set a higher threshold
+ [{
+ 'mount': '/',
+ 'size_available': 50 * 10**9, # would normally be enough...
+ }],
+ ['100.0 GB'],
+ ),
+ (
['nodes'],
+ 0,
[{
'mount': '/',
'size_available': 1 * 10**9,
@@ -108,6 +132,7 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
),
(
['etcd'],
+ 0,
[{
'mount': '/',
'size_available': 1,
@@ -116,6 +141,7 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
),
(
['nodes', 'masters'],
+ 0,
[{
'mount': '/',
# enough space for a node, not enough for a master
@@ -125,6 +151,7 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
),
(
['etcd'],
+ 0,
[{
# enough space on / ...
'mount': '/',
@@ -137,9 +164,10 @@ def test_succeeds_with_recommended_disk_space(group_names, ansible_mounts):
['0.0 GB'],
),
])
-def test_fails_with_insufficient_disk_space(group_names, ansible_mounts, extra_words):
+def test_fails_with_insufficient_disk_space(group_names, configured_min, ansible_mounts, extra_words):
task_vars = dict(
group_names=group_names,
+ openshift_check_min_host_disk_gb=configured_min,
ansible_mounts=ansible_mounts,
)
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index 2a9c32f77..197c65f51 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -3,26 +3,182 @@ import pytest
from openshift_checks.docker_image_availability import DockerImageAvailability
-@pytest.mark.xfail(strict=True) # TODO: remove this once this test is fully implemented.
-@pytest.mark.parametrize('task_vars,expected_result', [
- (
- dict(
- openshift=dict(common=dict(
+@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
+ ("origin", True, [], True),
+ ("openshift-enterprise", True, [], True),
+ ("enterprise", True, [], False),
+ ("online", True, [], False),
+ ("invalid", True, [], False),
+ ("", True, [], False),
+ ("origin", False, [], False),
+ ("openshift-enterprise", False, [], False),
+ ("origin", False, ["nodes", "masters"], True),
+ ("openshift-enterprise", False, ["etcd"], False),
+])
+def test_is_active(deployment_type, is_containerized, group_names, expect_active):
+ task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ openshift_deployment_type=deployment_type,
+ group_names=group_names,
+ )
+ assert DockerImageAvailability.is_active(task_vars=task_vars) == expect_active
+
+
+@pytest.mark.parametrize("is_containerized,is_atomic", [
+ (True, True),
+ (False, False),
+ (True, False),
+ (False, True),
+])
+def test_all_images_available_locally(is_containerized, is_atomic):
+ def execute_module(module_name, args, task_vars):
+ if module_name == "yum":
+ return {"changed": True}
+
+ assert module_name == "docker_image_facts"
+ assert 'name' in args
+ assert args['name']
+ return {
+ 'images': [args['name']],
+ }
+
+ result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=is_containerized,
+ is_atomic=is_atomic,
+ ),
+ docker=dict(additional_registries=["docker.io"]),
+ ),
+ openshift_deployment_type='origin',
+ openshift_release='v3.4',
+ openshift_image_tag='3.4',
+ ))
+
+ assert not result.get('failed', False)
+
+
+@pytest.mark.parametrize("available_locally", [
+ False,
+ True,
+])
+def test_all_images_available_remotely(available_locally):
+ def execute_module(module_name, args, task_vars):
+ if module_name == 'docker_image_facts':
+ return {'images': [], 'failed': available_locally}
+ return {'changed': False}
+
+ result = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
service_type='origin',
is_containerized=False,
- )),
- openshift_release='v3.5',
- deployment_type='origin',
- openshift_image_tag='', # FIXME: should not be required
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["docker.io", "registry.access.redhat.com"]),
),
- {'changed': False},
+ openshift_deployment_type='origin',
+ openshift_release='3.4',
+ openshift_image_tag='v3.4',
+ ))
+
+ assert not result.get('failed', False)
+
+
+def test_all_images_unavailable():
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ if module_name == "command":
+ return {
+ 'failed': True,
+ }
+
+ return {
+ 'changed': False,
+ }
+
+ check = DockerImageAvailability(execute_module=execute_module)
+ actual = check.run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["docker.io"]),
+ ),
+ openshift_deployment_type="openshift-enterprise",
+ openshift_release=None,
+ openshift_image_tag='latest'
+ ))
+
+ assert actual['failed']
+ assert "required Docker images are not available" in actual['msg']
+
+
+@pytest.mark.parametrize("message,extra_words", [
+ (
+ "docker image update failure",
+ ["docker image update failure"],
+ ),
+ (
+ "No package matching 'skopeo' found available, installed or updated",
+ ["dependencies can be installed via `yum`"]
),
- # TODO: add more parameters here to test the multiple possible inputs that affect behavior.
])
-def test_docker_image_availability(task_vars, expected_result):
+def test_skopeo_update_failure(message, extra_words):
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
- return {'info': {}} # TODO: this will vary depending on input parameters.
+ if module_name == "yum":
+ return {
+ "failed": True,
+ "msg": message,
+ "changed": False,
+ }
- check = DockerImageAvailability(execute_module=execute_module)
- result = check.run(tmp=None, task_vars=task_vars)
- assert result == expected_result
+ return {'changed': False}
+
+ actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=["unknown.io"]),
+ ),
+ openshift_deployment_type="openshift-enterprise",
+ openshift_release='',
+ openshift_image_tag='',
+ ))
+
+ assert actual["failed"]
+ for word in extra_words:
+ assert word in actual["msg"]
+
+
+@pytest.mark.parametrize("deployment_type,registries", [
+ ("origin", ["unknown.io"]),
+ ("openshift-enterprise", ["registry.access.redhat.com"]),
+ ("openshift-enterprise", []),
+])
+def test_registry_availability(deployment_type, registries):
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {
+ 'changed': False,
+ }
+
+ actual = DockerImageAvailability(execute_module=execute_module).run(tmp=None, task_vars=dict(
+ openshift=dict(
+ common=dict(
+ service_type='origin',
+ is_containerized=False,
+ is_atomic=False,
+ ),
+ docker=dict(additional_registries=registries),
+ ),
+ openshift_deployment_type=deployment_type,
+ openshift_release='',
+ openshift_image_tag='',
+ ))
+
+ assert not actual.get("failed", False)
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
new file mode 100644
index 000000000..292a323db
--- /dev/null
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -0,0 +1,224 @@
+import pytest
+
+from openshift_checks import OpenShiftCheckException
+from openshift_checks.docker_storage import DockerStorage
+
+
+def dummy_check(execute_module=None):
+ def dummy_exec(self, status, task_vars):
+ raise Exception("dummy executor called")
+ return DockerStorage(execute_module=execute_module or dummy_exec)
+
+
+@pytest.mark.parametrize('is_containerized, group_names, is_active', [
+ (False, ["masters", "etcd"], False),
+ (False, ["masters", "nodes"], True),
+ (True, ["etcd"], True),
+])
+def test_is_active(is_containerized, group_names, is_active):
+ task_vars = dict(
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ group_names=group_names,
+ )
+ assert DockerStorage.is_active(task_vars=task_vars) == is_active
+
+
+non_atomic_task_vars = {"openshift": {"common": {"is_atomic": False}}}
+
+
+@pytest.mark.parametrize('docker_info, failed, expect_msg', [
+ (
+ dict(failed=True, msg="Error connecting: Error while fetching server API version"),
+ True,
+ ["Is docker running on this host?"],
+ ),
+ (
+ dict(msg="I have no info"),
+ True,
+ ["missing info"],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Pool Name", "docker-docker--pool")],
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "devicemapper",
+ "DriverStatus": [("Data loop file", "true")],
+ }),
+ True,
+ ["loopback devices with the Docker devicemapper storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay2",
+ "DriverStatus": []
+ }),
+ False,
+ [],
+ ),
+ (
+ dict(info={
+ "Driver": "overlay",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+ (
+ dict(info={
+ "Driver": "unsupported",
+ }),
+ True,
+ ["unsupported Docker storage driver"],
+ ),
+])
+def test_check_storage_driver(docker_info, failed, expect_msg):
+ def execute_module(module_name, args, tmp=None, task_vars=None):
+ if module_name == "yum":
+ return {}
+ if module_name != "docker_info":
+ raise ValueError("not expecting module " + module_name)
+ return docker_info
+
+ check = dummy_check(execute_module=execute_module)
+ check._check_dm_usage = lambda status, task_vars: dict() # stub out for this test
+ result = check.run(tmp=None, task_vars=non_atomic_task_vars)
+
+ if failed:
+ assert result["failed"]
+ else:
+ assert not result.get("failed", False)
+
+ for word in expect_msg:
+ assert word in result["msg"]
+
+
+enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "19.92 MB",
+ "Data Space Total": "8.535 GB",
+ "Metadata Space Used": "40.96 kB",
+ "Metadata Space Total": "25.17 MB",
+}
+
+not_enough_space = {
+ "Pool Name": "docker--vg-docker--pool",
+ "Data Space Used": "10 GB",
+ "Data Space Total": "10 GB",
+ "Metadata Space Used": "42 kB",
+ "Metadata Space Total": "43 kB",
+}
+
+
+@pytest.mark.parametrize('task_vars, driver_status, vg_free, success, expect_msg', [
+ (
+ {"max_thinpool_data_usage_percent": "not a float"},
+ enough_space,
+ "12g",
+ False,
+ ["is not a percentage"],
+ ),
+ (
+ {},
+ {}, # empty values from driver status
+ "bogus", # also does not parse as bytes
+ False,
+ ["Could not interpret", "as bytes"],
+ ),
+ (
+ {},
+ enough_space,
+ "12.00g",
+ True,
+ [],
+ ),
+ (
+ {},
+ not_enough_space,
+ "0.00",
+ False,
+ ["data usage", "metadata usage", "higher than threshold"],
+ ),
+])
+def test_dm_usage(task_vars, driver_status, vg_free, success, expect_msg):
+ check = dummy_check()
+ check._get_vg_free = lambda pool, task_vars: vg_free
+ result = check._check_dm_usage(driver_status, task_vars)
+ result_success = not result.get("failed")
+
+ assert result_success is success
+ for msg in expect_msg:
+ assert msg in result["msg"]
+
+
+@pytest.mark.parametrize('pool, command_returns, raises, returns', [
+ (
+ "foo-bar",
+ { # vgs missing
+ "msg": "[Errno 2] No such file or directory",
+ "failed": True,
+ "cmd": "/sbin/vgs",
+ "rc": 2,
+ },
+ "Failed to run /sbin/vgs",
+ None,
+ ),
+ (
+ "foo", # no hyphen in name - should not happen
+ {},
+ "name does not have the expected format",
+ None,
+ ),
+ (
+ "foo-bar",
+ dict(stdout=" 4.00g\n"),
+ None,
+ "4.00g",
+ ),
+ (
+ "foo-bar",
+ dict(stdout="\n"), # no matching VG
+ "vgs did not find this VG",
+ None,
+ )
+])
+def test_vg_free(pool, command_returns, raises, returns):
+ def execute_module(module_name, args, tmp=None, task_vars=None):
+ if module_name != "command":
+ raise ValueError("not expecting module " + module_name)
+ return command_returns
+
+ check = dummy_check(execute_module=execute_module)
+ if raises:
+ with pytest.raises(OpenShiftCheckException) as err:
+ check._get_vg_free(pool, {})
+ assert raises in str(err.value)
+ else:
+ ret = check._get_vg_free(pool, {})
+ assert ret == returns
+
+
+@pytest.mark.parametrize('string, expect_bytes', [
+ ("12", 12.0),
+ ("12 k", 12.0 * 1024),
+ ("42.42 MB", 42.42 * 1024**2),
+ ("12g", 12.0 * 1024**3),
+])
+def test_convert_to_bytes(string, expect_bytes):
+ got = DockerStorage._convert_to_bytes(string)
+ assert got == expect_bytes
+
+
+@pytest.mark.parametrize('string', [
+ "bork",
+ "42 Qs",
+])
+def test_convert_to_bytes_error(string):
+ with pytest.raises(ValueError) as err:
+ DockerStorage._convert_to_bytes(string)
+ assert "Cannot convert" in str(err.value)
+ assert string in str(err.value)
diff --git a/roles/openshift_health_checker/test/elasticsearch_test.py b/roles/openshift_health_checker/test/elasticsearch_test.py
new file mode 100644
index 000000000..b9d375d8c
--- /dev/null
+++ b/roles/openshift_health_checker/test/elasticsearch_test.py
@@ -0,0 +1,180 @@
+import pytest
+import json
+
+from openshift_checks.logging.elasticsearch import Elasticsearch
+
+task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
+
+
+def canned_elasticsearch(exec_oc=None):
+ """Create an Elasticsearch check object with canned exec_oc method"""
+ check = Elasticsearch("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es"},
+ "name": "logging-es",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es",
+}
+
+split_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es-2"},
+ "name": "logging-es-2",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es-2",
+}
+
+
+def test_check_elasticsearch():
+ assert 'No logging Elasticsearch pods' in canned_elasticsearch().check_elasticsearch([], {})
+
+ # canned oc responses to match so all the checks pass
+ def _exec_oc(cmd, args, task_vars):
+ if '_cat/master' in cmd:
+ return 'name logging-es'
+ elif '/_nodes' in cmd:
+ return json.dumps(es_node_list)
+ elif '_cluster/health' in cmd:
+ return '{"status": "green"}'
+ elif ' df ' in cmd:
+ return 'IUse% Use%\n 3% 4%\n'
+ else:
+ raise Exception(cmd)
+
+ assert not canned_elasticsearch(_exec_oc).check_elasticsearch([plain_es_pod], {})
+
+
+def pods_by_name(pods):
+ return {pod['metadata']['name']: pod for pod in pods}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ 'No logging Elasticsearch masters',
+ ),
+ (
+ [plain_es_pod],
+ None,
+ ),
+ (
+ [plain_es_pod, split_es_pod],
+ 'Found multiple Elasticsearch masters',
+ ),
+])
+def test_check_elasticsearch_masters(pods, expect_error):
+ test_pods = list(pods)
+ check = canned_elasticsearch(lambda cmd, args, task_vars: test_pods.pop(0)['_test_master_name_str'])
+
+ errors = check._check_elasticsearch_masters(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+es_node_list = {
+ 'nodes': {
+ 'random-es-name': {
+ 'host': 'logging-es',
+ }}}
+
+
+@pytest.mark.parametrize('pods, node_list, expect_error', [
+ (
+ [],
+ {},
+ 'No logging Elasticsearch masters',
+ ),
+ (
+ [plain_es_pod],
+ es_node_list,
+ None,
+ ),
+ (
+ [plain_es_pod],
+ {}, # empty list of nodes triggers KeyError
+ "Failed to query",
+ ),
+ (
+ [split_es_pod],
+ es_node_list,
+ 'does not correspond to any known ES pod',
+ ),
+])
+def test_check_elasticsearch_node_list(pods, node_list, expect_error):
+ check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(node_list))
+
+ errors = check._check_elasticsearch_node_list(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+@pytest.mark.parametrize('pods, health_data, expect_error', [
+ (
+ [plain_es_pod],
+ [{"status": "green"}],
+ None,
+ ),
+ (
+ [plain_es_pod],
+ [{"no-status": "should bomb"}],
+ 'Could not retrieve cluster health status',
+ ),
+ (
+ [plain_es_pod, split_es_pod],
+ [{"status": "green"}, {"status": "red"}],
+ 'Elasticsearch cluster health status is RED',
+ ),
+])
+def test_check_elasticsearch_cluster_health(pods, health_data, expect_error):
+ test_health_data = list(health_data)
+ check = canned_elasticsearch(lambda cmd, args, task_vars: json.dumps(test_health_data.pop(0)))
+
+ errors = check._check_es_cluster_health(pods_by_name(pods), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
+
+
+@pytest.mark.parametrize('disk_data, expect_error', [
+ (
+ 'df: /elasticsearch/persistent: No such file or directory\n',
+ 'Could not retrieve storage usage',
+ ),
+ (
+ 'IUse% Use%\n 3% 4%\n',
+ None,
+ ),
+ (
+ 'IUse% Use%\n 95% 40%\n',
+ 'Inode percent usage on the storage volume',
+ ),
+ (
+ 'IUse% Use%\n 3% 94%\n',
+ 'Disk percent usage on the storage volume',
+ ),
+])
+def test_check_elasticsearch_diskspace(disk_data, expect_error):
+ check = canned_elasticsearch(lambda cmd, args, task_vars: disk_data)
+
+ errors = check._check_elasticsearch_diskspace(pods_by_name([plain_es_pod]), task_vars_config_base)
+ assert_error(''.join(errors), expect_error)
diff --git a/roles/openshift_health_checker/test/fluentd_test.py b/roles/openshift_health_checker/test/fluentd_test.py
new file mode 100644
index 000000000..d151c0b19
--- /dev/null
+++ b/roles/openshift_health_checker/test/fluentd_test.py
@@ -0,0 +1,109 @@
+import pytest
+import json
+
+from openshift_checks.logging.fluentd import Fluentd
+
+
+def canned_fluentd(exec_oc=None):
+ """Create a Fluentd check object with canned exec_oc method"""
+ check = Fluentd("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+fluentd_pod_node1 = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {"host": "node1", "nodeName": "node1"},
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+fluentd_pod_node2_down = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-2",
+ },
+ "spec": {"host": "node2", "nodeName": "node2"},
+ "status": {
+ "containerStatuses": [{"ready": False}],
+ "conditions": [{"status": "False", "type": "Ready"}],
+ }
+}
+fluentd_node1 = {
+ "metadata": {
+ "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "node1"},
+ "name": "node1",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.1"}]},
+}
+fluentd_node2 = {
+ "metadata": {
+ "labels": {"logging-infra-fluentd": "true", "kubernetes.io/hostname": "hostname"},
+ "name": "node2",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.2"}]},
+}
+fluentd_node3_unlabeled = {
+ "metadata": {
+ "labels": {"kubernetes.io/hostname": "hostname"},
+ "name": "node3",
+ },
+ "status": {"addresses": [{"type": "InternalIP", "address": "10.10.1.3"}]},
+}
+
+
+@pytest.mark.parametrize('pods, nodes, expect_error', [
+ (
+ [],
+ [],
+ 'No nodes appear to be defined',
+ ),
+ (
+ [],
+ [fluentd_node3_unlabeled],
+ 'There are no nodes with the fluentd label',
+ ),
+ (
+ [],
+ [fluentd_node1, fluentd_node3_unlabeled],
+ 'Fluentd will not aggregate logs from these nodes.',
+ ),
+ (
+ [],
+ [fluentd_node2],
+ "nodes are supposed to have a Fluentd pod but do not",
+ ),
+ (
+ [fluentd_pod_node1, fluentd_pod_node1],
+ [fluentd_node1],
+ 'more Fluentd pods running than nodes labeled',
+ ),
+ (
+ [fluentd_pod_node2_down],
+ [fluentd_node2],
+ "Fluentd pods are supposed to be running",
+ ),
+ (
+ [fluentd_pod_node1],
+ [fluentd_node1],
+ None,
+ ),
+])
+def test_get_fluentd_pods(pods, nodes, expect_error):
+ check = canned_fluentd(lambda cmd, args, task_vars: json.dumps(dict(items=nodes)))
+
+ error = check.check_fluentd(pods, {})
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/kibana_test.py b/roles/openshift_health_checker/test/kibana_test.py
new file mode 100644
index 000000000..19140a1b6
--- /dev/null
+++ b/roles/openshift_health_checker/test/kibana_test.py
@@ -0,0 +1,218 @@
+import pytest
+import json
+
+try:
+ import urllib2
+ from urllib2 import HTTPError, URLError
+except ImportError:
+ from urllib.error import HTTPError, URLError
+ import urllib.request as urllib2
+
+from openshift_checks.logging.kibana import Kibana
+
+
+def canned_kibana(exec_oc=None):
+ """Create a Kibana check object with canned exec_oc method"""
+ check = Kibana("dummy") # fails if a module is actually invoked
+ if exec_oc:
+ check._exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+not_running_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-2",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": False}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+
+@pytest.mark.parametrize('pods, expect_error', [
+ (
+ [],
+ "There are no Kibana pods deployed",
+ ),
+ (
+ [plain_kibana_pod],
+ None,
+ ),
+ (
+ [not_running_kibana_pod],
+ "No Kibana pod is in a running state",
+ ),
+ (
+ [plain_kibana_pod, not_running_kibana_pod],
+ "The following Kibana pods are not currently in a running state",
+ ),
+])
+def test_check_kibana(pods, expect_error):
+ check = canned_kibana()
+ error = check.check_kibana(pods)
+ assert_error(error, expect_error)
+
+
+@pytest.mark.parametrize('route, expect_url, expect_error', [
+ (
+ None,
+ None,
+ 'no_route_exists',
+ ),
+
+ # test route with no ingress
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [],
+ },
+ "spec": {
+ "host": "hostname",
+ }
+ },
+ None,
+ 'route_not_accepted',
+ ),
+
+ # test route with no host
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [{
+ "status": True,
+ }],
+ },
+ "spec": {},
+ },
+ None,
+ 'route_missing_host',
+ ),
+
+ # test route that looks fine
+ (
+ {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana",
+ },
+ "status": {
+ "ingress": [{
+ "status": True,
+ }],
+ },
+ "spec": {
+ "host": "hostname",
+ },
+ },
+ "https://hostname/",
+ None,
+ ),
+])
+def test_get_kibana_url(route, expect_url, expect_error):
+ check = canned_kibana(lambda cmd, args, task_vars: json.dumps(route) if route else "")
+
+ url, error = check._get_kibana_url({})
+ if expect_url:
+ assert url == expect_url
+ else:
+ assert not url
+ if expect_error:
+ assert error == expect_error
+ else:
+ assert not error
+
+
+@pytest.mark.parametrize('exec_result, expect', [
+ (
+ 'urlopen error [Errno 111] Connection refused',
+ 'at least one router routing to it?',
+ ),
+ (
+ 'urlopen error [Errno -2] Name or service not known',
+ 'DNS configured for the Kibana hostname?',
+ ),
+ (
+ 'Status code was not [302]: HTTP Error 500: Server error',
+ 'did not return the correct status code',
+ ),
+ (
+ 'bork bork bork',
+ 'bork bork bork', # should pass through
+ ),
+])
+def test_verify_url_internal_failure(exec_result, expect):
+ check = Kibana(execute_module=lambda module_name, args, task_vars: dict(failed=True, msg=exec_result))
+ check._get_kibana_url = lambda task_vars: ('url', None)
+
+ error = check._check_kibana_route({})
+ assert_error(error, expect)
+
+
+@pytest.mark.parametrize('lib_result, expect', [
+ (
+ HTTPError('url', 500, "it broke", hdrs=None, fp=None),
+ 'it broke',
+ ),
+ (
+ URLError('it broke'),
+ 'it broke',
+ ),
+ (
+ 302,
+ 'returned the wrong error code',
+ ),
+ (
+ 200,
+ None,
+ ),
+])
+def test_verify_url_external_failure(lib_result, expect, monkeypatch):
+
+ class _http_return:
+
+ def __init__(self, code):
+ self.code = code
+
+ def getcode(self):
+ return self.code
+
+ def urlopen(url, context):
+ if type(lib_result) is int:
+ return _http_return(lib_result)
+ raise lib_result
+ monkeypatch.setattr(urllib2, 'urlopen', urlopen)
+
+ check = canned_kibana()
+ check._get_kibana_url = lambda task_vars: ('url', None)
+ check._verify_url_internal = lambda url, task_vars: None
+
+ error = check._check_kibana_route({})
+ assert_error(error, expect)
diff --git a/roles/openshift_health_checker/test/logging_check_test.py b/roles/openshift_health_checker/test/logging_check_test.py
new file mode 100644
index 000000000..b6db34fe3
--- /dev/null
+++ b/roles/openshift_health_checker/test/logging_check_test.py
@@ -0,0 +1,137 @@
+import pytest
+import json
+
+from openshift_checks.logging.logging import LoggingCheck, OpenShiftCheckException
+
+task_vars_config_base = dict(openshift=dict(common=dict(config_base='/etc/origin')))
+
+
+logging_namespace = "logging"
+
+
+def canned_loggingcheck(exec_oc=None):
+ """Create a LoggingCheck object with canned exec_oc method"""
+ check = LoggingCheck("dummy") # fails if a module is actually invoked
+ check.logging_namespace = 'logging'
+ if exec_oc:
+ check.exec_oc = exec_oc
+ return check
+
+
+def assert_error(error, expect_error):
+ if expect_error:
+ assert error
+ assert expect_error in error
+ else:
+ assert not error
+
+
+plain_es_pod = {
+ "metadata": {
+ "labels": {"component": "es", "deploymentconfig": "logging-es"},
+ "name": "logging-es",
+ },
+ "status": {
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "containerStatuses": [{"ready": True}],
+ "podIP": "10.10.10.10",
+ },
+ "_test_master_name_str": "name logging-es",
+}
+
+plain_kibana_pod = {
+ "metadata": {
+ "labels": {"component": "kibana", "deploymentconfig": "logging-kibana"},
+ "name": "logging-kibana-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}, {"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+fluentd_pod_node1 = {
+ "metadata": {
+ "labels": {"component": "fluentd", "deploymentconfig": "logging-fluentd"},
+ "name": "logging-fluentd-1",
+ },
+ "spec": {"host": "node1", "nodeName": "node1"},
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ }
+}
+
+plain_curator_pod = {
+ "metadata": {
+ "labels": {"component": "curator", "deploymentconfig": "logging-curator"},
+ "name": "logging-curator-1",
+ },
+ "status": {
+ "containerStatuses": [{"ready": True}],
+ "conditions": [{"status": "True", "type": "Ready"}],
+ "podIP": "10.10.10.10",
+ }
+}
+
+
+@pytest.mark.parametrize('problem, expect', [
+ ("[Errno 2] No such file or directory", "supposed to be a master"),
+ ("Permission denied", "Unexpected error using `oc`"),
+])
+def test_oc_failure(problem, expect):
+ def execute_module(module_name, args, task_vars):
+ if module_name == "ocutil":
+ return dict(failed=True, result=problem)
+ return dict(changed=False)
+
+ check = LoggingCheck({})
+
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.exec_oc(execute_module, logging_namespace, 'get foo', [], task_vars=task_vars_config_base)
+ assert expect in str(excinfo)
+
+
+groups_with_first_master = dict(masters=['this-host', 'other-host'])
+groups_with_second_master = dict(masters=['other-host', 'this-host'])
+groups_not_a_master = dict(masters=['other-host'])
+
+
+@pytest.mark.parametrize('groups, logging_deployed, is_active', [
+ (groups_with_first_master, True, True),
+ (groups_with_first_master, False, False),
+ (groups_not_a_master, True, False),
+ (groups_with_second_master, True, False),
+ (groups_not_a_master, True, False),
+])
+def test_is_active(groups, logging_deployed, is_active):
+ task_vars = dict(
+ ansible_ssh_host='this-host',
+ groups=groups,
+ openshift_hosted_logging_deploy=logging_deployed,
+ )
+
+ assert LoggingCheck.is_active(task_vars=task_vars) == is_active
+
+
+@pytest.mark.parametrize('pod_output, expect_pods, expect_error', [
+ (
+ 'No resources found.',
+ None,
+ 'There are no pods in the logging namespace',
+ ),
+ (
+ json.dumps({'items': [plain_kibana_pod, plain_es_pod, plain_curator_pod, fluentd_pod_node1]}),
+ [plain_es_pod],
+ None,
+ ),
+])
+def test_get_pods_for_component(pod_output, expect_pods, expect_error):
+ check = canned_loggingcheck(lambda exec_module, namespace, cmd, args, task_vars: pod_output)
+ pods, error = check.get_pods_for_component(
+ lambda name, args, task_vars: {},
+ logging_namespace,
+ "es",
+ {}
+ )
+ assert_error(error, expect_error)
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
index e161a5b9e..4fbaea0a9 100644
--- a/roles/openshift_health_checker/test/memory_availability_test.py
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -20,27 +20,42 @@ def test_is_active(group_names, is_active):
assert MemoryAvailability.is_active(task_vars=task_vars) == is_active
-@pytest.mark.parametrize('group_names,ansible_memtotal_mb', [
+@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
(
['masters'],
+ 0,
17200,
),
(
['nodes'],
+ 0,
8200,
),
(
+ ['nodes'],
+ 1, # configure lower threshold
+ 2000, # too low for recommended but not for configured
+ ),
+ (
+ ['nodes'],
+ 2, # configure threshold where adjustment pushes it over
+ 1900,
+ ),
+ (
['etcd'],
- 22200,
+ 0,
+ 8200,
),
(
['masters', 'nodes'],
+ 0,
17000,
),
])
-def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb):
+def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_memtotal_mb):
task_vars = dict(
group_names=group_names,
+ openshift_check_min_host_memory_gb=configured_min,
ansible_memtotal_mb=ansible_memtotal_mb,
)
@@ -50,39 +65,62 @@ def test_succeeds_with_recommended_memory(group_names, ansible_memtotal_mb):
assert not result.get('failed', False)
-@pytest.mark.parametrize('group_names,ansible_memtotal_mb,extra_words', [
+@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb,extra_words', [
(
['masters'],
0,
- ['0.0 GB'],
+ 0,
+ ['0.0 GiB'],
),
(
['nodes'],
+ 0,
100,
- ['0.1 GB'],
+ ['0.1 GiB'],
+ ),
+ (
+ ['nodes'],
+ 24, # configure higher threshold
+ 20 * 1024, # enough to meet recommended but not configured
+ ['20.0 GiB'],
+ ),
+ (
+ ['nodes'],
+ 24, # configure higher threshold
+ 22 * 1024, # not enough for adjustment to push over threshold
+ ['22.0 GiB'],
),
(
['etcd'],
- -1,
- ['0.0 GB'],
+ 0,
+ 6 * 1024,
+ ['6.0 GiB'],
+ ),
+ (
+ ['etcd', 'masters'],
+ 0,
+ 9 * 1024, # enough memory for etcd, not enough for a master
+ ['9.0 GiB'],
),
(
['nodes', 'masters'],
+ 0,
# enough memory for a node, not enough for a master
- 11000,
- ['11.0 GB'],
+ 11 * 1024,
+ ['11.0 GiB'],
),
])
-def test_fails_with_insufficient_memory(group_names, ansible_memtotal_mb, extra_words):
+def test_fails_with_insufficient_memory(group_names, configured_min, ansible_memtotal_mb, extra_words):
task_vars = dict(
group_names=group_names,
+ openshift_check_min_host_memory_gb=configured_min,
ansible_memtotal_mb=ansible_memtotal_mb,
)
check = MemoryAvailability(execute_module=fake_execute_module)
result = check.run(tmp=None, task_vars=task_vars)
- assert result['failed']
+ assert result.get('failed', False)
for word in 'below recommended'.split() + extra_words:
assert word in result['msg']
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
new file mode 100644
index 000000000..6494e1c06
--- /dev/null
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -0,0 +1,89 @@
+import pytest
+
+from openshift_checks.ovs_version import OvsVersion, OpenShiftCheckException
+
+
+def test_openshift_version_not_supported():
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {}
+
+ openshift_release = '111.7.0'
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type='origin',
+ )
+
+ check = OvsVersion(execute_module=execute_module)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ assert "no recommended version of Open vSwitch" in str(excinfo.value)
+
+
+def test_invalid_openshift_release_format():
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {}
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_image_tag='v0',
+ openshift_deployment_type='origin',
+ )
+
+ check = OvsVersion(execute_module=execute_module)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+ assert "invalid version" in str(excinfo.value)
+
+
+@pytest.mark.parametrize('openshift_release,expected_ovs_version', [
+ ("3.5", "2.6"),
+ ("3.6", "2.6"),
+ ("3.4", "2.4"),
+ ("3.3", "2.4"),
+ ("1.0", "2.4"),
+])
+def test_ovs_package_version(openshift_release, expected_ovs_version):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'rpm_version'
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if pkg["name"] == "openvswitch":
+ assert pkg["version"] == expected_ovs_version
+
+ return return_value
+
+ check = OvsVersion(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result is return_value
+
+
+@pytest.mark.parametrize('group_names,is_containerized,is_active', [
+ (['masters'], False, True),
+ # ensure check is skipped on containerized installs
+ (['masters'], True, False),
+ (['nodes'], False, True),
+ (['masters', 'nodes'], False, True),
+ (['masters', 'etcd'], False, True),
+ ([], False, False),
+ (['etcd'], False, False),
+ (['lb'], False, False),
+ (['nfs'], False, False),
+])
+def test_ovs_version_skip_when_not_master_nor_node(group_names, is_containerized, is_active):
+ task_vars = dict(
+ group_names=group_names,
+ openshift=dict(common=dict(is_containerized=is_containerized)),
+ )
+ assert OvsVersion.is_active(task_vars=task_vars) == is_active
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 196d9816a..91eace512 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -1,24 +1,132 @@
import pytest
-from openshift_checks.package_version import PackageVersion
+from openshift_checks.package_version import PackageVersion, OpenShiftCheckException
-def test_package_version():
+@pytest.mark.parametrize('openshift_release, extra_words', [
+ ('111.7.0', ["no recommended version of Open vSwitch"]),
+ ('0.0.0', ["no recommended version of Docker"]),
+])
+def test_openshift_version_not_supported(openshift_release, extra_words):
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {}
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type='origin',
+ )
+
+ check = PackageVersion(execute_module=execute_module)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+
+ for word in extra_words:
+ assert word in str(excinfo.value)
+
+
+def test_invalid_openshift_release_format():
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ return {}
+
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_image_tag='v0',
+ openshift_deployment_type='origin',
+ )
+
+ check = PackageVersion(execute_module=execute_module)
+ with pytest.raises(OpenShiftCheckException) as excinfo:
+ check.run(tmp=None, task_vars=task_vars)
+ assert "invalid version" in str(excinfo.value)
+
+
+@pytest.mark.parametrize('openshift_release', [
+ "3.5",
+ "3.6",
+ "3.4",
+ "3.3",
+])
+def test_package_version(openshift_release):
task_vars = dict(
openshift=dict(common=dict(service_type='origin')),
- openshift_release='3.5',
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
openshift_deployment_type='origin',
)
return_value = object()
def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
assert module_name == 'aos_version'
- assert 'requested_openshift_release' in module_args
- assert 'openshift_deployment_type' in module_args
- assert 'rpm_prefix' in module_args
- assert module_args['requested_openshift_release'] == task_vars['openshift_release']
- assert module_args['openshift_deployment_type'] == task_vars['openshift_deployment_type']
- assert module_args['rpm_prefix'] == task_vars['openshift']['common']['service_type']
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if "-master" in pkg["name"] or "-node" in pkg["name"]:
+ assert pkg["version"] == task_vars["openshift_release"]
+
+ return return_value
+
+ check = PackageVersion(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result is return_value
+
+
+@pytest.mark.parametrize('deployment_type,openshift_release,expected_ovs_version', [
+ ("openshift-enterprise", "3.5", "2.6"),
+ ("origin", "3.6", "2.6"),
+ ("openshift-enterprise", "3.4", "2.4"),
+ ("origin", "3.3", "2.4"),
+])
+def test_ovs_package_version(deployment_type, openshift_release, expected_ovs_version):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type=deployment_type,
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'aos_version'
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if pkg["name"] == "openvswitch":
+ assert pkg["version"] == expected_ovs_version
+
+ return return_value
+
+ check = PackageVersion(execute_module=execute_module)
+ result = check.run(tmp=None, task_vars=task_vars)
+ assert result is return_value
+
+
+@pytest.mark.parametrize('deployment_type,openshift_release,expected_docker_version', [
+ ("origin", "3.5", "1.12"),
+ ("openshift-enterprise", "3.4", "1.12"),
+ ("origin", "3.3", "1.10"),
+ ("openshift-enterprise", "3.2", "1.10"),
+ ("origin", "3.1", "1.8"),
+ ("openshift-enterprise", "3.1", "1.8"),
+])
+def test_docker_package_version(deployment_type, openshift_release, expected_docker_version):
+ task_vars = dict(
+ openshift=dict(common=dict(service_type='origin')),
+ openshift_release=openshift_release,
+ openshift_image_tag='v' + openshift_release,
+ openshift_deployment_type=deployment_type,
+ )
+ return_value = object()
+
+ def execute_module(module_name=None, module_args=None, tmp=None, task_vars=None):
+ assert module_name == 'aos_version'
+ assert "package_list" in module_args
+
+ for pkg in module_args["package_list"]:
+ if pkg["name"] == "docker":
+ assert pkg["version"] == expected_docker_version
+
return return_value
check = PackageVersion(execute_module=execute_module)
diff --git a/roles/openshift_health_checker/test/rpm_version_test.py b/roles/openshift_health_checker/test/rpm_version_test.py
new file mode 100644
index 000000000..2f09ef965
--- /dev/null
+++ b/roles/openshift_health_checker/test/rpm_version_test.py
@@ -0,0 +1,82 @@
+import pytest
+import rpm_version
+
+expected_pkgs = {
+ "spam": {
+ "name": "spam",
+ "version": "3.2.1",
+ },
+ "eggs": {
+ "name": "eggs",
+ "version": "3.2.1",
+ },
+}
+
+
+@pytest.mark.parametrize('pkgs, expect_not_found', [
+ (
+ {},
+ ["spam", "eggs"], # none found
+ ),
+ (
+ {"spam": ["3.2.1", "4.5.1"]},
+ ["eggs"], # completely missing
+ ),
+ (
+ {
+ "spam": ["3.2.1", "4.5.1"],
+ "eggs": ["3.2.1"],
+ },
+ [], # all found
+ ),
+])
+def test_check_pkg_found(pkgs, expect_not_found):
+ if expect_not_found:
+ with pytest.raises(rpm_version.RpmVersionException) as e:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+ assert "not found to be installed" in str(e.value)
+ assert set(expect_not_found) == set(e.value.problem_pkgs)
+ else:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+
+@pytest.mark.parametrize('pkgs, expect_not_found', [
+ (
+ {
+ 'spam': ['3.2.1'],
+ 'eggs': ['3.3.2'],
+ },
+ {
+ "eggs": {
+ "required_version": "3.2",
+ "found_versions": ["3.3"],
+ }
+ }, # not the right version
+ ),
+ (
+ {
+ 'spam': ['3.1.2', "3.3.2"],
+ 'eggs': ['3.3.2', "1.2.3"],
+ },
+ {
+ "eggs": {
+ "required_version": "3.2",
+ "found_versions": ["3.3", "1.2"],
+ },
+ "spam": {
+ "required_version": "3.2",
+ "found_versions": ["3.1", "3.3"],
+ }
+ }, # not the right version
+ ),
+])
+def test_check_pkg_version_found(pkgs, expect_not_found):
+ if expect_not_found:
+ with pytest.raises(rpm_version.RpmVersionException) as e:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
+
+ assert "found to be installed with an incorrect version" in str(e.value)
+ assert expect_not_found == e.value.problem_pkgs
+ else:
+ rpm_version._check_pkg_versions(pkgs, expected_pkgs)
diff --git a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
index 3dde83bee..8aaba0f3c 100644
--- a/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/object_storage.yml
@@ -1,20 +1,4 @@
---
-- name: Assert supported openshift.hosted.registry.storage.provider
- assert:
- that:
- - openshift.hosted.registry.storage.provider in ['azure_blob', 's3', 'swift']
- msg: >
- Object Storage Provider: "{{ openshift.hosted.registry.storage.provider }}"
- is not currently supported
-
-- name: Assert implemented openshift.hosted.registry.storage.provider
- assert:
- that:
- - openshift.hosted.registry.storage.provider not in ['azure_blob', 'swift']
- msg: >
- Support for provider: "{{ openshift.hosted.registry.storage.provider }}"
- not implemented yet
-
- include: s3.yml
when: openshift.hosted.registry.storage.provider == 's3'
diff --git a/roles/openshift_hosted/tasks/registry/storage/s3.yml b/roles/openshift_hosted/tasks/registry/storage/s3.yml
index 26f921f15..318969885 100644
--- a/roles/openshift_hosted/tasks/registry/storage/s3.yml
+++ b/roles/openshift_hosted/tasks/registry/storage/s3.yml
@@ -2,14 +2,10 @@
- name: Assert that S3 variables are provided for registry_config template
assert:
that:
- - openshift.hosted.registry.storage.s3.accesskey | default(none) is not none
- - openshift.hosted.registry.storage.s3.secretkey | default(none) is not none
- openshift.hosted.registry.storage.s3.bucket | default(none) is not none
- openshift.hosted.registry.storage.s3.region | default(none) is not none
msg: |
When using S3 storage, the following variables are required:
- openshift_hosted_registry_storage_s3_accesskey
- openshift_hosted_registry_storage_s3_secretkey
openshift_hosted_registry_storage_s3_bucket
openshift_hosted_registry_storage_s3_region
diff --git a/roles/openshift_hosted/tasks/router/router.yml b/roles/openshift_hosted/tasks/router/router.yml
index e75e3b16f..192afc87a 100644
--- a/roles/openshift_hosted/tasks/router/router.yml
+++ b/roles/openshift_hosted/tasks/router/router.yml
@@ -37,7 +37,7 @@
cafile: "{{ openshift_master_config_dir ~ '/ca.crt' }}"
# End Block
- when: openshift_hosted_router_create_certificate
+ when: openshift_hosted_router_create_certificate | bool
- name: Get the certificate contents for router
copy:
@@ -46,7 +46,7 @@
src: "{{ item }}"
with_items: "{{ openshift_hosted_routers | oo_collect(attribute='certificate') |
oo_select_keys_from_list(['keyfile', 'certfile', 'cafile']) }}"
- when: not openshift_hosted_router_create_certificate
+ when: not openshift_hosted_router_create_certificate | bool
- name: Create the router service account(s)
oc_serviceaccount:
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index ca6a23f21..dc8a9f089 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -10,8 +10,12 @@ storage:
blobdescriptor: inmemory
{% if openshift_hosted_registry_storage_provider | default('') == 's3' %}
s3:
+{% if openshift_hosted_registry_storage_s3_accesskey is defined %}
accesskey: {{ openshift_hosted_registry_storage_s3_accesskey }}
+{% endif %}
+{% if openshift_hosted_registry_storage_s3_secretkey is defined %}
secretkey: {{ openshift_hosted_registry_storage_s3_secretkey }}
+{% endif %}
region: {{ openshift_hosted_registry_storage_s3_region }}
{% if openshift_hosted_registry_storage_s3_regionendpoint is defined %}
regionendpoint: {{ openshift_hosted_registry_storage_s3_regionendpoint }}
diff --git a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
index 8fe02444e..8bf98ba41 100644
--- a/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
+++ b/roles/openshift_hosted_templates/files/v3.6/enterprise/registry-console.yaml
@@ -103,9 +103,9 @@ parameters:
- description: 'Specify "registry/repository" prefix for container image; e.g. for "registry.access.redhat.com/openshift3/registry-console:latest", set prefix "registry.access.redhat.com/openshift3/"'
name: IMAGE_PREFIX
value: "registry.access.redhat.com/openshift3/"
- - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:3.6", set version "3.6"'
+ - description: 'Specify image version; e.g. for "registry.access.redhat.com/openshift3/registry-console:v3.6", set version "v3.6"'
name: IMAGE_VERSION
- value: "3.6"
+ value: "v3.6"
- description: "The public URL for the Openshift OAuth Provider, e.g. https://openshift.example.com:8443"
name: OPENSHIFT_OAUTH_PROVIDER_URL
required: true
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 3c410eff2..0c60ef6fd 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -124,3 +124,34 @@ Elasticsearch OPS too, if using an OPS cluster:
- `openshift_logging_es_ops_ca_ext`: The location of the CA cert for the cert
Elasticsearch uses for the external TLS server cert (default is the internal
CA)
+
+### mux - secure_forward listener service
+- `openshift_logging_use_mux`: Default `False`. If this is `True`, a service
+ called `mux` will be deployed. This service will act as a Fluentd
+ secure_forward forwarder for the node agent Fluentd daemonsets running in the
+ cluster. This can be used to reduce the number of connections to the
+ OpenShift API server, by using `mux` and configuring each node Fluentd to
+ send raw logs to mux and turn off the k8s metadata plugin.
+- `openshift_logging_mux_allow_external`: Default `False`. If this is `True`,
+ the `mux` service will be deployed, and it will be configured to allow
+ Fluentd clients running outside of the cluster to send logs using
+ secure_forward. This allows OpenShift logging to be used as a central
+ logging service for clients other than OpenShift, or other OpenShift
+ clusters.
+- `openshift_logging_use_mux_client`: Default `False`. If this is `True`, the
+ node agent Fluentd services will be configured to send logs to the mux
+ service rather than directly to Elasticsearch.
+- `openshift_logging_mux_hostname`: Default is "mux." +
+ `openshift_master_default_subdomain`. This is the hostname *external*_
+ clients will use to connect to mux, and will be used in the TLS server cert
+ subject.
+- `openshift_logging_mux_port`: 24284
+- `openshift_logging_mux_cpu_limit`: 100m
+- `openshift_logging_mux_memory_limit`: 512Mi
+- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
+ first value in the list is the namespace to use for undefined projects,
+ followed by any additional namespaces to create by default - users will
+ typically not need to set this
+- `openshift_logging_mux_namespaces`: Default `[]` - additional namespaces to
+ create for _external_ mux clients to associate with their logs - users will
+ need to set this
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 837c54067..573cbdd09 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -160,8 +160,13 @@ openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(Fa
openshift_logging_use_mux_client: False
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
-openshift_logging_mux_cpu_limit: 100m
-openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_cpu_limit: 500m
+openshift_logging_mux_memory_limit: 1Gi
+# the namespace to use for undefined projects should come first, followed by any
+# additional namespaces to create by default - users will typically not need to set this
+openshift_logging_mux_default_namespaces: ["mux-undefined"]
+# extra namespaces to create for mux clients - users will need to set this
+openshift_logging_mux_namespaces: []
# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
#es_logging_contents:
diff --git a/roles/openshift_logging/files/logging-deployer-sa.yaml b/roles/openshift_logging/files/logging-deployer-sa.yaml
deleted file mode 100644
index 334c9402b..000000000
--- a/roles/openshift_logging/files/logging-deployer-sa.yaml
+++ /dev/null
@@ -1,6 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: logging-deployer
-secrets:
-- name: logging-deployer
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index 44b0b2d48..eac086e81 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -42,7 +42,7 @@ def map_from_pairs(source, delim="="):
if source == '':
return dict()
- return dict(source.split(delim) for item in source.split(","))
+ return dict(item.split(delim) for item in source.split(","))
# pylint: disable=too-few-public-methods
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index a55e72725..35accfb78 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -318,7 +318,7 @@ def main():
''' The main method '''
module = AnsibleModule( # noqa: F405
argument_spec=dict(
- admin_kubeconfig={"required": True, "type": "str"},
+ admin_kubeconfig={"default": "/etc/origin/master/admin.kubeconfig", "type": "str"},
oc_bin={"required": True, "type": "str"},
openshift_logging_namespace={"required": True, "type": "str"}
),
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 2f5b68b4d..6d023a02d 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -1,43 +1,41 @@
---
-- name: stop logging
- include: stop_cluster.yaml
-
# delete the deployment objects that we had created
- name: delete logging api objects
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete {{ item }} --selector logging-infra -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_obj:
+ state: absent
+ kind: "{{ item }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ selector: "logging-infra"
with_items:
- dc
- rc
- svc
- routes
- templates
- - daemonset
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
+ - ds
# delete the oauthclient
- name: delete oauthclient kibana-proxy
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig delete oauthclient kibana-proxy --ignore-not-found=true
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+ oc_obj:
+ state: absent
+ kind: oauthclient
+ namespace: "{{ openshift_logging_namespace }}"
+ name: kibana-proxy
# delete any image streams that we may have created
- name: delete logging is
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete is -l logging-infra=support -n {{ openshift_logging_namespace }} --ignore-not-found=true
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
+ oc_obj:
+ state: absent
+ kind: is
+ namespace: "{{ openshift_logging_namespace }}"
+ selector: "logging-infra=support"
# delete our old secrets
- name: delete logging secrets
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete secret {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_secret:
+ state: absent
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- logging-fluentd
- logging-elasticsearch
@@ -45,71 +43,55 @@
- logging-kibana-proxy
- logging-curator
- logging-mux
- ignore_errors: yes
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete role bindings
-- name: delete rolebindings
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete rolebinding {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - logging-elasticsearch-view-role
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete cluster role bindings
-- name: delete cluster role bindings
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterrolebindings {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - rolebinding-reader
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
-
-# delete cluster roles
-- name: delete cluster roles
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterroles {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
- with_items:
- - rolebinding-reader
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
# delete our service accounts
- name: delete service accounts
oc_serviceaccount:
- name: "{{ item }}"
- namespace: "{{ openshift_logging_namespace }}"
state: absent
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- aggregated-logging-elasticsearch
- aggregated-logging-kibana
- aggregated-logging-curator
- aggregated-logging-fluentd
-# delete our roles
-- name: delete roles
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete clusterrole {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+# delete role bindings
+- name: delete rolebindings
+ oc_obj:
+ state: absent
+ kind: rolebinding
+ namespace: "{{ openshift_logging_namespace }}"
+ name: logging-elasticsearch-view-role
+
+# delete cluster role bindings
+- name: delete cluster role bindings
+ oc_obj:
+ state: absent
+ kind: clusterrolebindings
+ namespace: "{{ openshift_logging_namespace }}"
+ name: rolebinding-reader
+
+# delete cluster roles
+- name: delete cluster roles
+ oc_obj:
+ state: absent
+ kind: clusterrole
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
+ - rolebinding-reader
- daemonset-admin
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
# delete our configmaps
- name: delete configmaps
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- delete configmap {{ item }} -n {{ openshift_logging_namespace }} --ignore-not-found=true
+ oc_obj:
+ state: absent
+ kind: configmap
+ namespace: "{{ openshift_logging_namespace }}"
+ name: "{{ item }}"
with_items:
- logging-curator
- logging-elasticsearch
- logging-fluentd
- logging-mux
- register: delete_result
- changed_when: delete_result.stdout.find("deleted") != -1 and delete_result.rc == 0
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 46a7e82c6..9c8f0986a 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -17,7 +17,7 @@
- name: Generate certificates
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-signer-cert
--key={{generated_certs_dir}}/ca.key --cert={{generated_certs_dir}}/ca.crt
--serial={{generated_certs_dir}}/ca.serial.txt --name=logging-signer-test
check_mode: no
@@ -51,14 +51,14 @@
with_items:
- procure_component: mux
hostnames: "logging-mux, {{openshift_logging_mux_hostname}}"
- when: openshift_logging_use_mux
+ when: openshift_logging_use_mux | bool
- include: procure_shared_key.yaml
loop_control:
loop_var: shared_key_info
with_items:
- procure_component: mux
- when: openshift_logging_use_mux
+ when: openshift_logging_use_mux | bool
- include: procure_server_certs.yaml
loop_control:
@@ -124,7 +124,7 @@
- system.logging.mux
loop_control:
loop_var: node_name
- when: openshift_logging_use_mux
+ when: openshift_logging_use_mux | bool
- name: Generate PEM cert for Elasticsearch external route
include: generate_pems.yaml component={{node_name}}
diff --git a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml b/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
deleted file mode 100644
index 56f590717..000000000
--- a/roles/openshift_logging/tasks/generate_clusterrolebindings.yaml
+++ /dev/null
@@ -1,13 +0,0 @@
----
-- name: Generate ClusterRoleBindings
- template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/logging-15-{{obj_name}}-clusterrolebinding.yaml
- vars:
- acct_name: aggregated-logging-elasticsearch
- obj_name: rolebinding-reader
- crb_usernames: ["system:serviceaccount:{{openshift_logging_namespace}}:{{acct_name}}"]
- subjects:
- - kind: ServiceAccount
- name: "{{acct_name}}"
- namespace: "{{openshift_logging_namespace}}"
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_clusterroles.yaml b/roles/openshift_logging/tasks/generate_clusterroles.yaml
deleted file mode 100644
index 0b8b1014c..000000000
--- a/roles/openshift_logging/tasks/generate_clusterroles.yaml
+++ /dev/null
@@ -1,11 +0,0 @@
----
-- name: Generate ClusterRole for cluster-reader
- template: src=clusterrole.j2 dest={{mktemp.stdout}}/templates/logging-10-{{obj_name}}-clusterrole.yaml
- vars:
- obj_name: rolebinding-reader
- rules:
- - resources: [clusterrolebindings]
- verbs:
- - get
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_configmaps.yaml b/roles/openshift_logging/tasks/generate_configmaps.yaml
deleted file mode 100644
index b047eb35a..000000000
--- a/roles/openshift_logging/tasks/generate_configmaps.yaml
+++ /dev/null
@@ -1,178 +0,0 @@
----
-- block:
- - fail:
- msg: "The openshift_logging_es_log_appenders '{{openshift_logging_es_log_appenders}}' has an unrecognized option and only supports the following as a list: {{es_log_appenders | join(', ')}}"
- when:
- - es_logging_contents is undefined
- - "{{ openshift_logging_es_log_appenders | list | difference(es_log_appenders) | length != 0 }}"
- changed_when: no
-
- - template:
- src: elasticsearch-logging.yml.j2
- dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
- vars:
- root_logger: "{{openshift_logging_es_log_appenders | join(', ')}}"
- when: es_logging_contents is undefined
- changed_when: no
- check_mode: no
-
- - local_action: >
- template src=elasticsearch.yml.j2
- dest="{{local_tmp.stdout}}/elasticsearch-gen-template.yml"
- vars:
- - allow_cluster_reader: "{{openshift_logging_es_ops_allow_cluster_reader | lower | default('false')}}"
- - es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
- - es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
- when: es_config_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{ config_source | combine(override_config,recursive=True) | to_nice_yaml }}"
- dest: "{{mktemp.stdout}}/elasticsearch.yml"
- vars:
- config_source: "{{lookup('file','{{local_tmp.stdout}}/elasticsearch-gen-template.yml') | from_yaml }}"
- override_config: "{{openshift_logging_es_config | from_yaml}}"
- when: es_logging_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{es_logging_contents}}"
- dest: "{{mktemp.stdout}}/elasticsearch-logging.yml"
- when: es_logging_contents is defined
- changed_when: no
-
- - copy:
- content: "{{es_config_contents}}"
- dest: "{{mktemp.stdout}}/elasticsearch.yml"
- when: es_config_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-elasticsearch
- --from-file=logging.yml={{mktemp.stdout}}/elasticsearch-logging.yml --from-file=elasticsearch.yml={{mktemp.stdout}}/elasticsearch.yml -o yaml --dry-run
- register: es_configmap
- changed_when: no
-
- - copy:
- content: "{{es_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-elasticsearch-configmap.yaml"
- when: es_configmap.stdout is defined
- changed_when: no
- check_mode: no
-
-- block:
- - copy:
- src: curator.yml
- dest: "{{mktemp.stdout}}/curator.yml"
- when: curator_config_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{curator_config_contents}}"
- dest: "{{mktemp.stdout}}/curator.yml"
- when: curator_config_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-curator
- --from-file=config.yaml={{mktemp.stdout}}/curator.yml -o yaml --dry-run
- register: curator_configmap
- changed_when: no
-
- - copy:
- content: "{{curator_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-curator-configmap.yaml"
- when: curator_configmap.stdout is defined
- changed_when: no
- check_mode: no
-
-- block:
- - copy:
- src: fluent.conf
- dest: "{{mktemp.stdout}}/fluent.conf"
- when: fluentd_config_contents is undefined
- changed_when: no
-
- - copy:
- src: fluentd-throttle-config.yaml
- dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is undefined
- changed_when: no
-
- - copy:
- src: secure-forward.conf
- dest: "{{mktemp.stdout}}/secure-forward.conf"
- when: fluentd_securefoward_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{fluentd_config_contents}}"
- dest: "{{mktemp.stdout}}/fluent.conf"
- when: fluentd_config_contents is defined
- changed_when: no
-
- - copy:
- content: "{{fluentd_throttle_contents}}"
- dest: "{{mktemp.stdout}}/fluentd-throttle-config.yaml"
- when: fluentd_throttle_contents is defined
- changed_when: no
-
- - copy:
- content: "{{fluentd_secureforward_contents}}"
- dest: "{{mktemp.stdout}}/secure-forward.conf"
- when: fluentd_secureforward_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-fluentd
- --from-file=fluent.conf={{mktemp.stdout}}/fluent.conf --from-file=throttle-config.yaml={{mktemp.stdout}}/fluentd-throttle-config.yaml
- --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward.conf -o yaml --dry-run
- register: fluentd_configmap
- changed_when: no
-
- - copy:
- content: "{{fluentd_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-fluentd-configmap.yaml"
- when: fluentd_configmap.stdout is defined
- changed_when: no
- check_mode: no
-
-- block:
- - copy:
- src: fluent.conf
- dest: "{{mktemp.stdout}}/fluent-mux.conf"
- when: fluentd_mux_config_contents is undefined
- changed_when: no
-
- - copy:
- src: secure-forward.conf
- dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
- when: fluentd_mux_securefoward_contents is undefined
- changed_when: no
-
- - copy:
- content: "{{fluentd_mux_config_contents}}"
- dest: "{{mktemp.stdout}}/fluent-mux.conf"
- when: fluentd_mux_config_contents is defined
- changed_when: no
-
- - copy:
- content: "{{fluentd_mux_secureforward_contents}}"
- dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
- when: fluentd_mux_secureforward_contents is defined
- changed_when: no
-
- - command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig create configmap logging-mux
- --from-file=fluent.conf={{mktemp.stdout}}/fluent-mux.conf
- --from-file=secure-forward.conf={{mktemp.stdout}}/secure-forward-mux.conf -o yaml --dry-run
- register: mux_configmap
- changed_when: no
-
- - copy:
- content: "{{mux_configmap.stdout}}"
- dest: "{{mktemp.stdout}}/templates/logging-mux-configmap.yaml"
- when: mux_configmap.stdout is defined
- changed_when: no
- check_mode: no
- when: openshift_logging_use_mux
diff --git a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml b/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
deleted file mode 100644
index 8aea4e81f..000000000
--- a/roles/openshift_logging/tasks/generate_deploymentconfigs.yaml
+++ /dev/null
@@ -1,65 +0,0 @@
----
-- name: Generate kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-dc.yaml
- vars:
- component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/logging-kibana-ops-dc.yaml
- vars:
- component: kibana-ops
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- check_mode: no
- changed_when: no
-
-- name: Generate elasticsearch deploymentconfig
- template: src=es.j2 dest={{mktemp.stdout}}/logging-es-dc.yaml
- vars:
- component: es
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-abc123"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS elasticsearch deploymentconfig
- template: src=es.j2 dest={{mktemp.stdout}}/logging-es-ops-dc.yaml
- vars:
- component: es-ops
- deploy_name_prefix: "logging-{{component}}"
- deploy_name: "{{deploy_name_prefix}}-abc123"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- check_mode: no
- changed_when: no
-
-- name: Generate curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-dc.yaml
- vars:
- component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/logging-curator-ops-dc.yaml
- vars:
- component: curator-ops
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- openshift_logging_es_host: logging-es-ops
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_pvcs.yaml b/roles/openshift_logging/tasks/generate_pvcs.yaml
deleted file mode 100644
index fa7a86c27..000000000
--- a/roles/openshift_logging/tasks/generate_pvcs.yaml
+++ /dev/null
@@ -1,47 +0,0 @@
----
-- name: Init pool of PersistentVolumeClaim names
- set_fact: es_pvc_pool={{es_pvc_pool|default([]) + [pvc_name]}}
- vars:
- pvc_name: "{{es_pvc_prefix}}-{{item| int}}"
- start: "{{es_pvc_names | map('regex_search', es_pvc_prefix+'.*')|select('string')|list|length}}"
- with_sequence: start={{start}} end={{ (start|int > es_cluster_size|int - 1) | ternary(start, es_cluster_size|int - 1)}}
- when:
- - "{{ es_dc_names|default([]) | length <= es_cluster_size|int }}"
- - es_pvc_size | search('^\d.*')
- check_mode: no
-
-- name: Generating PersistentVolumeClaims
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{claim_name}}"
- size: "{{es_pvc_size}}"
- access_modes: "{{ es_access_modes | list }}"
- pv_selector: "{{es_pv_selector}}"
- with_items:
- - "{{es_pvc_pool | default([])}}"
- loop_control:
- loop_var: claim_name
- when:
- - not es_pvc_dynamic
- - es_pvc_pool is defined
- check_mode: no
- changed_when: no
-
-- name: Generating PersistentVolumeClaims - Dynamic
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{claim_name}}"
- annotations:
- volume.alpha.kubernetes.io/storage-class: "dynamic"
- size: "{{es_pvc_size}}"
- access_modes: "{{ es_access_modes | list }}"
- pv_selector: "{{es_pv_selector}}"
- with_items:
- - "{{es_pvc_pool|default([])}}"
- loop_control:
- loop_var: claim_name
- when:
- - es_pvc_dynamic
- - es_pvc_pool is defined
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_rolebindings.yaml b/roles/openshift_logging/tasks/generate_rolebindings.yaml
deleted file mode 100644
index 7dc9530df..000000000
--- a/roles/openshift_logging/tasks/generate_rolebindings.yaml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Generate RoleBindings
- template: src=rolebinding.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-rolebinding.yaml
- vars:
- obj_name: logging-elasticsearch-view-role
- roleRef:
- name: view
- subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_routes.yaml b/roles/openshift_logging/tasks/generate_routes.yaml
deleted file mode 100644
index ae9a8e023..000000000
--- a/roles/openshift_logging/tasks/generate_routes.yaml
+++ /dev/null
@@ -1,169 +0,0 @@
----
-- set_fact: kibana_key={{ lookup('file', openshift_logging_kibana_key) | b64encode }}
- when: openshift_logging_kibana_key | trim | length > 0
- changed_when: false
-
-- set_fact: kibana_cert={{ lookup('file', openshift_logging_kibana_cert)| b64encode }}
- when: openshift_logging_kibana_cert | trim | length > 0
- changed_when: false
-
-- set_fact: kibana_ca={{ lookup('file', openshift_logging_kibana_ca)| b64encode }}
- when: openshift_logging_kibana_ca | trim | length > 0
- changed_when: false
-
-- set_fact: kibana_ca={{key_pairs | entry_from_named_pair('ca_file') }}
- when: kibana_ca is not defined
- changed_when: false
-
-- name: Generating logging routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-route.yaml
- tags: routes
- vars:
- obj_name: "logging-kibana"
- route_host: "{{openshift_logging_kibana_hostname}}"
- service_name: "logging-kibana"
- tls_key: "{{kibana_key | default('') | b64decode}}"
- tls_cert: "{{kibana_cert | default('') | b64decode}}"
- tls_ca_cert: "{{kibana_ca | b64decode}}"
- tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
- edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}"
- labels:
- component: support
- logging-infra: support
- provider: openshift
- changed_when: no
-
-- set_fact: kibana_ops_key={{ lookup('file', openshift_logging_kibana_ops_key) | b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{ openshift_logging_kibana_ops_key | trim | length > 0 }}"
- changed_when: false
-
-- set_fact: kibana_ops_cert={{ lookup('file', openshift_logging_kibana_ops_cert)| b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{openshift_logging_kibana_ops_cert | trim | length > 0}}"
- changed_when: false
-
-- set_fact: kibana_ops_ca={{ lookup('file', openshift_logging_kibana_ops_ca)| b64encode }}
- when:
- - openshift_logging_use_ops | bool
- - "{{openshift_logging_kibana_ops_ca | trim | length > 0}}"
- changed_when: false
-
-- set_fact: kibana_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }}
- when:
- - openshift_logging_use_ops | bool
- - kibana_ops_ca is not defined
- changed_when: false
-
-- name: Generating logging ops routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-kibana-ops-route.yaml
- tags: routes
- vars:
- obj_name: "logging-kibana-ops"
- route_host: "{{openshift_logging_kibana_ops_hostname}}"
- service_name: "logging-kibana-ops"
- tls_key: "{{kibana_ops_key | default('') | b64decode}}"
- tls_cert: "{{kibana_ops_cert | default('') | b64decode}}"
- tls_ca_cert: "{{kibana_ops_ca | b64decode}}"
- tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
- edge_term_policy: "{{openshift_logging_kibana_edge_term_policy | default('') }}"
- labels:
- component: support
- logging-infra: support
- provider: openshift
- when: openshift_logging_use_ops | bool
- changed_when: no
-
-- set_fact: es_key={{ lookup('file', openshift_logging_es_key) | b64encode }}
- when:
- - openshift_logging_es_key | trim | length > 0
- - openshift_logging_es_allow_external | bool
- changed_when: false
-
-- set_fact: es_cert={{ lookup('file', openshift_logging_es_cert)| b64encode }}
- when:
- - openshift_logging_es_cert | trim | length > 0
- - openshift_logging_es_allow_external | bool
- changed_when: false
-
-- set_fact: es_ca={{ lookup('file', openshift_logging_es_ca_ext)| b64encode }}
- when:
- - openshift_logging_es_ca_ext | trim | length > 0
- - openshift_logging_es_allow_external | bool
- changed_when: false
-
-- set_fact: es_ca={{key_pairs | entry_from_named_pair('ca_file') }}
- when:
- - es_ca is not defined
- - openshift_logging_es_allow_external | bool
- changed_when: false
-
-- name: Generating Elasticsearch logging routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-es-route.yaml
- tags: routes
- vars:
- obj_name: "logging-es"
- route_host: "{{openshift_logging_es_hostname}}"
- service_name: "logging-es"
- tls_key: "{{es_key | default('') | b64decode}}"
- tls_cert: "{{es_cert | default('') | b64decode}}"
- tls_ca_cert: "{{es_ca | b64decode}}"
- tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
- edge_term_policy: "{{openshift_logging_es_edge_term_policy | default('') }}"
- labels:
- component: support
- logging-infra: support
- provider: openshift
- changed_when: no
- when: openshift_logging_es_allow_external | bool
-
-- set_fact: es_ops_key={{ lookup('file', openshift_logging_es_ops_key) | b64encode }}
- when:
- - openshift_logging_es_ops_allow_external | bool
- - openshift_logging_use_ops | bool
- - "{{ openshift_logging_es_ops_key | trim | length > 0 }}"
- changed_when: false
-
-- set_fact: es_ops_cert={{ lookup('file', openshift_logging_es_ops_cert)| b64encode }}
- when:
- - openshift_logging_es_ops_allow_external | bool
- - openshift_logging_use_ops | bool
- - "{{openshift_logging_es_ops_cert | trim | length > 0}}"
- changed_when: false
-
-- set_fact: es_ops_ca={{ lookup('file', openshift_logging_es_ops_ca_ext)| b64encode }}
- when:
- - openshift_logging_es_ops_allow_external | bool
- - openshift_logging_use_ops | bool
- - "{{openshift_logging_es_ops_ca_ext | trim | length > 0}}"
- changed_when: false
-
-- set_fact: es_ops_ca={{key_pairs | entry_from_named_pair('ca_file') }}
- when:
- - openshift_logging_es_ops_allow_external | bool
- - openshift_logging_use_ops | bool
- - es_ops_ca is not defined
- changed_when: false
-
-- name: Generating Elasticsearch logging ops routes
- template: src=route_reencrypt.j2 dest={{mktemp.stdout}}/templates/logging-logging-es-ops-route.yaml
- tags: routes
- vars:
- obj_name: "logging-es-ops"
- route_host: "{{openshift_logging_es_ops_hostname}}"
- service_name: "logging-es-ops"
- tls_key: "{{es_ops_key | default('') | b64decode}}"
- tls_cert: "{{es_ops_cert | default('') | b64decode}}"
- tls_ca_cert: "{{es_ops_ca | b64decode}}"
- tls_dest_ca_cert: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"
- edge_term_policy: "{{openshift_logging_es_ops_edge_term_policy | default('') }}"
- labels:
- component: support
- logging-infra: support
- provider: openshift
- when:
- - openshift_logging_es_ops_allow_external | bool
- - openshift_logging_use_ops | bool
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_secrets.yaml b/roles/openshift_logging/tasks/generate_secrets.yaml
deleted file mode 100644
index b629bd995..000000000
--- a/roles/openshift_logging/tasks/generate_secrets.yaml
+++ /dev/null
@@ -1,129 +0,0 @@
----
-- name: Retrieving the cert to use when generating secrets for the logging components
- slurp: src="{{generated_certs_dir}}/{{item.file}}"
- register: key_pairs
- with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "kibana_key", file: "system.logging.kibana.key"}
- - { name: "kibana_cert", file: "system.logging.kibana.crt"}
- - { name: "curator_key", file: "system.logging.curator.key"}
- - { name: "curator_cert", file: "system.logging.curator.crt"}
- - { name: "fluentd_key", file: "system.logging.fluentd.key"}
- - { name: "fluentd_cert", file: "system.logging.fluentd.crt"}
- - { name: "kibana_internal_key", file: "kibana-internal.key"}
- - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
- - { name: "server_tls", file: "server-tls.json"}
-
-- name: Generating secrets for logging components
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: "logging-{{component}}"
- secret_key_file: "{{component}}_key"
- secret_cert_file: "{{component}}_cert"
- secrets:
- - {key: ca, value: "{{key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
- - {key: key, value: "{{key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
- - {key: cert, value: "{{key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
- secret_keys: ["ca", "cert", "key"]
- with_items:
- - kibana
- - curator
- - fluentd
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
-
-- name: Retrieving the cert to use when generating secrets for mux
- slurp: src="{{generated_certs_dir}}/{{item.file}}"
- register: mux_key_pairs
- with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "mux_key", file: "system.logging.mux.key"}
- - { name: "mux_cert", file: "system.logging.mux.crt"}
- - { name: "mux_shared_key", file: "mux_shared_key"}
- when: openshift_logging_use_mux
-
-- name: Generating secrets for mux
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: "logging-{{component}}"
- secret_key_file: "{{component}}_key"
- secret_cert_file: "{{component}}_cert"
- secrets:
- - {key: ca, value: "{{mux_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
- - {key: key, value: "{{mux_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
- - {key: cert, value: "{{mux_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
- - {key: shared_key, value: "{{mux_key_pairs | entry_from_named_pair('mux_shared_key')| b64decode }}"}
- secret_keys: ["ca", "cert", "key", "shared_key"]
- with_items:
- - mux
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
- when: openshift_logging_use_mux
-
-- name: Generating secrets for kibana proxy
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: logging-kibana-proxy
- secrets:
- - {key: oauth-secret, value: "{{oauth_secret}}"}
- - {key: session-secret, value: "{{session_secret}}"}
- - {key: server-key, value: "{{kibana_key_file}}"}
- - {key: server-cert, value: "{{kibana_cert_file}}"}
- - {key: server-tls.json, value: "{{server_tls_file}}"}
- secret_keys: ["server-tls.json", "server-key", "session-secret", "oauth-secret", "server-cert"]
- kibana_key_file: "{{key_pairs | entry_from_named_pair('kibana_internal_key')| b64decode }}"
- kibana_cert_file: "{{key_pairs | entry_from_named_pair('kibana_internal_cert')| b64decode }}"
- server_tls_file: "{{key_pairs | entry_from_named_pair('server_tls')| b64decode }}"
- check_mode: no
- changed_when: no
-
-- name: Generating secrets for elasticsearch
- command: >
- {{openshift.common.client_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig secrets new {{secret_name}}
- key={{generated_certs_dir}}/logging-es.jks truststore={{generated_certs_dir}}/truststore.jks
- searchguard.key={{generated_certs_dir}}/elasticsearch.jks searchguard.truststore={{generated_certs_dir}}/truststore.jks
- admin-key={{generated_certs_dir}}/system.admin.key admin-cert={{generated_certs_dir}}/system.admin.crt
- admin-ca={{generated_certs_dir}}/ca.crt admin.jks={{generated_certs_dir}}/system.admin.jks -o yaml
- vars:
- secret_name: logging-elasticsearch
- secret_keys: ["admin-cert", "searchguard.key", "admin-ca", "key", "truststore", "admin-key", "searchguard.truststore"]
- register: logging_es_secret
- check_mode: no
- changed_when: no
-
-- copy: content="{{logging_es_secret.stdout}}" dest={{mktemp.stdout}}/templates/logging-elasticsearch-secret.yaml
- when: logging_es_secret.stdout is defined
- check_mode: no
- changed_when: no
-
-- name: Retrieving the cert to use when generating secrets for Elasticsearch external route
- slurp: src="{{generated_certs_dir}}/{{item.file}}"
- register: es_key_pairs
- with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "es_key", file: "system.logging.es.key"}
- - { name: "es_cert", file: "system.logging.es.crt"}
- when: openshift_logging_es_allow_external | bool
-
-- name: Generating secrets for Elasticsearch external route
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{secret_name}}-secret.yaml
- vars:
- secret_name: "logging-{{component}}"
- secret_key_file: "{{component}}_key"
- secret_cert_file: "{{component}}_cert"
- secrets:
- - {key: ca, value: "{{es_key_pairs | entry_from_named_pair('ca_file')| b64decode }}"}
- - {key: key, value: "{{es_key_pairs | entry_from_named_pair(secret_key_file)| b64decode }}"}
- - {key: cert, value: "{{es_key_pairs | entry_from_named_pair(secret_cert_file)| b64decode }}"}
- secret_keys: ["ca", "cert", "key"]
- with_items:
- - es
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
- when: openshift_logging_es_allow_external | bool
diff --git a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml b/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
deleted file mode 100644
index 21bcdfecb..000000000
--- a/roles/openshift_logging/tasks/generate_serviceaccounts.yaml
+++ /dev/null
@@ -1,14 +0,0 @@
----
-- name: Generating serviceaccounts
- template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/logging-{{component}}-sa.yaml
- vars:
- obj_name: aggregated-logging-{{component}}
- with_items:
- - elasticsearch
- - kibana
- - fluentd
- - curator
- loop_control:
- loop_var: component
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/generate_services.yaml b/roles/openshift_logging/tasks/generate_services.yaml
deleted file mode 100644
index e3a5c5eb3..000000000
--- a/roles/openshift_logging/tasks/generate_services.yaml
+++ /dev/null
@@ -1,119 +0,0 @@
----
-- name: Generating logging-es service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-svc.yaml
- vars:
- obj_name: logging-es
- ports:
- - {port: 9200, targetPort: restapi}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-cluster service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-cluster-svc.yaml
- vars:
- obj_name: logging-es-cluster
- ports:
- - {port: 9300}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es
- check_mode: no
- changed_when: no
-
-- name: Generating logging-kibana service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-svc.yaml
- vars:
- obj_name: logging-kibana
- ports:
- - {port: 443, targetPort: oaproxy}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: kibana
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-ops service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-svc.yaml
- vars:
- obj_name: logging-es-ops
- ports:
- - {port: 9200, targetPort: restapi}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
-
-- name: Generating logging-es-ops-cluster service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-es-ops-cluster-svc.yaml
- vars:
- obj_name: logging-es-ops-cluster
- ports:
- - {port: 9300}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: es-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
-
-- name: Generating logging-kibana-ops service
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-svc.yaml
- vars:
- obj_name: logging-kibana-ops
- ports:
- - {port: 443, targetPort: oaproxy}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: kibana-ops
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
-
-- name: Generating logging-mux service for external connections
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
- vars:
- obj_name: logging-mux
- ports:
- - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: mux
- externalIPs:
- - "{{ ansible_eth0.ipv4.address }}"
- check_mode: no
- changed_when: no
- when: openshift_logging_mux_allow_external
-
-- name: Generating logging-mux service for intra-cluster connections
- template: src=service.j2 dest={{mktemp.stdout}}/templates/logging-mux-svc.yaml
- vars:
- obj_name: logging-mux
- ports:
- - {port: "{{openshift_logging_mux_port}}", targetPort: mux-forward, name: mux-forward}
- labels:
- logging-infra: support
- selector:
- provider: openshift
- component: mux
- check_mode: no
- changed_when: no
- when: openshift_logging_use_mux and not openshift_logging_mux_allow_external
diff --git a/roles/openshift_logging/tasks/install_curator.yaml b/roles/openshift_logging/tasks/install_curator.yaml
deleted file mode 100644
index ab8e207f1..000000000
--- a/roles/openshift_logging/tasks/install_curator.yaml
+++ /dev/null
@@ -1,53 +0,0 @@
----
-- name: Check Curator current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: curator_replica_count
- when: not ansible_check_mode
- ignore_errors: yes
- changed_when: no
-
-- name: Check Curator ops current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-curator-ops
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: curator_ops_replica_count
- when:
- - not ansible_check_mode
- - openshift_logging_use_ops | bool
- ignore_errors: yes
- changed_when: no
-
-- name: Generate curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-dc.yaml
- vars:
- component: curator
- logging_component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- curator_cpu_limit: "{{openshift_logging_curator_cpu_limit }}"
- curator_memory_limit: "{{openshift_logging_curator_memory_limit }}"
- replicas: "{{curator_replica_count.stdout | default (0)}}"
- curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS curator deploymentconfig
- template: src=curator.j2 dest={{mktemp.stdout}}/templates/logging-curator-ops-dc.yaml
- vars:
- component: curator-ops
- logging_component: curator
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- curator_cpu_limit: "{{openshift_logging_curator_ops_cpu_limit }}"
- curator_memory_limit: "{{openshift_logging_curator_ops_memory_limit }}"
- replicas: "{{curator_ops_replica_count.stdout | default (0)}}"
- curator_node_selector: "{{openshift_logging_curator_ops_nodeselector | default({}) }}"
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/install_elasticsearch.yaml b/roles/openshift_logging/tasks/install_elasticsearch.yaml
deleted file mode 100644
index a981e7f7f..000000000
--- a/roles/openshift_logging/tasks/install_elasticsearch.yaml
+++ /dev/null
@@ -1,118 +0,0 @@
----
-- name: Getting current ES deployment size
- set_fact: openshift_logging_current_es_size={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | length }}
-
-- set_fact: openshift_logging_es_pvc_prefix="logging-es"
- when: not openshift_logging_es_pvc_prefix or openshift_logging_es_pvc_prefix == ''
-
-- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}
- with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
-
-### evaluate if the PVC attached to the dc currently matches the provided vars
-## if it does then we reuse that pvc in the DC
-- include: set_es_storage.yaml
- vars:
- es_component: es
- es_name: "{{ deployment.0 }}"
- es_spec: "{{ deployment.1 }}"
- es_pvc_count: "{{ deployment.2 | int }}"
- es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
- es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch.pvcs.keys() | count }}"
- es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
- es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
- es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
- with_together:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() }}"
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
- - "{{ es_indices | default([]) }}"
- loop_control:
- loop_var: deployment
-## if it does not then we should create one that does and attach it
-
-## create new dc/pvc is needed
-- include: set_es_storage.yaml
- vars:
- es_component: es
- es_name: "logging-es-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- es_spec: "{}"
- es_pvc_count: "{{ item | int - 1 }}"
- es_node_selector: "{{ openshift_logging_es_nodeselector | default({}) }}"
- es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch.pvcs.keys() | count, openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count] | max }}"
- es_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- es_pvc_prefix: "{{ openshift_logging_es_pvc_prefix }}"
- es_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic | bool }}"
- es_pv_selector: "{{ openshift_logging_es_pv_selector }}"
- es_cpu_limit: "{{ openshift_logging_es_cpu_limit }}"
- es_memory_limit: "{{ openshift_logging_es_memory_limit }}"
- with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs | count }}
-
-# --------- Tasks for Operation clusters ---------
-
-- name: Getting current ES deployment size
- set_fact: openshift_logging_current_es_ops_size={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | length }}
-
-- set_fact: openshift_logging_es_ops_pvc_prefix="{{ openshift_logging_es_ops_pvc_prefix | default('logging-es-ops') }}"
-
-- name: Validate Elasticsearch cluster size for Ops
- fail: msg="The openshift_logging_es_ops_cluster_size may not be scaled down more than 1 less (or 0) the number of Elasticsearch nodes already deployed"
- vars:
- es_dcs: "{{openshift_logging_facts.elasticsearch_ops.deploymentconfigs}}"
- cluster_size: "{{openshift_logging_es_ops_cluster_size|int}}"
- when:
- - openshift_logging_use_ops | bool
- - "{{es_dcs | length - openshift_logging_es_ops_cluster_size|int | abs > 1}}"
- check_mode: no
-
-- set_fact: openshift_logging_es_ops_pvc_prefix="logging-es-ops"
- when: not openshift_logging_es_ops_pvc_prefix or openshift_logging_es_ops_pvc_prefix == ''
-
-- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
- with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
- when:
- - openshift_logging_use_ops | bool
-
-- include: set_es_storage.yaml
- vars:
- es_component: es-ops
- es_name: "{{ deployment.0 }}"
- es_spec: "{{ deployment.1 }}"
- es_pvc_count: "{{ deployment.2 | int }}"
- es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
- es_pvc_names_count: "{{ openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count }}"
- es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
- es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
- es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
- es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
- with_together:
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() }}"
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
- - "{{ es_ops_indices | default([]) }}"
- loop_control:
- loop_var: deployment
- when:
- - openshift_logging_use_ops | bool
-## if it does not then we should create one that does and attach it
-
-## create new dc/pvc is needed
-- include: set_es_storage.yaml
- vars:
- es_component: es-ops
- es_name: "logging-es-ops-{{'abcdefghijklmnopqrstuvwxyz0123456789'|random_word(8)}}"
- es_spec: "{}"
- es_pvc_count: "{{ item | int - 1 }}"
- es_node_selector: "{{ openshift_logging_es_ops_nodeselector | default({}) }}"
- es_pvc_names_count: "{{ [openshift_logging_facts.elasticsearch_ops.pvcs.keys() | count, openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count] | max }}"
- es_pvc_size: "{{ openshift_logging_es_ops_pvc_size }}"
- es_pvc_prefix: "{{ openshift_logging_es_ops_pvc_prefix }}"
- es_pvc_dynamic: "{{ openshift_logging_es_ops_pvc_dynamic | bool }}"
- es_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
- es_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
- es_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
- with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count }}
- when:
- - openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/install_fluentd.yaml b/roles/openshift_logging/tasks/install_fluentd.yaml
deleted file mode 100644
index 6bc405819..000000000
--- a/roles/openshift_logging/tasks/install_fluentd.yaml
+++ /dev/null
@@ -1,54 +0,0 @@
----
-- set_fact: fluentd_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
- check_mode: no
-
-- set_fact: fluentd_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
- check_mode: no
-
-- name: Generating Fluentd daemonset
- template: src=fluentd.j2 dest={{mktemp.stdout}}/templates/logging-fluentd.yaml
- vars:
- daemonset_name: logging-fluentd
- daemonset_component: fluentd
- daemonset_container_name: fluentd-elasticsearch
- daemonset_serviceAccount: aggregated-logging-fluentd
- ops_host: "{{ fluentd_ops_host }}"
- ops_port: "{{ fluentd_ops_port }}"
- fluentd_nodeselector_key: "{{openshift_logging_fluentd_nodeselector.keys()[0]}}"
- fluentd_nodeselector_value: "{{openshift_logging_fluentd_nodeselector.values()[0]}}"
- check_mode: no
- changed_when: no
-
-- name: "Check fluentd privileged permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get scc/privileged -o jsonpath='{.users}'
- register: fluentd_privileged
- check_mode: no
- changed_when: no
-
-- name: "Set privileged permissions for fluentd"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-scc-to-user privileged system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: fluentd_output
- failed_when: fluentd_output.rc == 1 and 'exists' not in fluentd_output.stderr
- check_mode: no
- when: fluentd_privileged.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
-
-- name: "Check fluentd cluster-reader permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
- register: fluentd_cluster_reader
- check_mode: no
- changed_when: no
-
-- name: "Set cluster-reader permissions for fluentd"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: fluentd2_output
- failed_when: fluentd2_output.rc == 1 and 'exists' not in fluentd2_output.stderr
- check_mode: no
- when: fluentd_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_kibana.yaml b/roles/openshift_logging/tasks/install_kibana.yaml
deleted file mode 100644
index 52bdeb50d..000000000
--- a/roles/openshift_logging/tasks/install_kibana.yaml
+++ /dev/null
@@ -1,60 +0,0 @@
----
-- name: Check Kibana current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: kibana_replica_count
- when: not ansible_check_mode
- ignore_errors: yes
- changed_when: no
-
-- name: Check Kibana ops current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-kibana-ops
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: kibana_ops_replica_count
- when:
- - not ansible_check_mode
- - openshift_logging_use_ops | bool
- ignore_errors: yes
- changed_when: no
-
-
-- name: Generate kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-dc.yaml
- vars:
- component: kibana
- logging_component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- kibana_cpu_limit: "{{openshift_logging_kibana_cpu_limit }}"
- kibana_memory_limit: "{{openshift_logging_kibana_memory_limit }}"
- kibana_proxy_cpu_limit: "{{openshift_logging_kibana_proxy_cpu_limit }}"
- kibana_proxy_memory_limit: "{{openshift_logging_kibana_proxy_memory_limit }}"
- replicas: "{{kibana_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_nodeselector | default({})}}"
- check_mode: no
- changed_when: no
-
-- name: Generate OPS kibana deploymentconfig
- template: src=kibana.j2 dest={{mktemp.stdout}}/templates/logging-kibana-ops-dc.yaml
- vars:
- component: kibana-ops
- logging_component: kibana
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-kibana:{{openshift_logging_image_version}}"
- proxy_image: "{{openshift_logging_image_prefix}}logging-auth-proxy:{{openshift_logging_image_version}}"
- es_host: logging-es-ops
- es_port: "{{openshift_logging_es_ops_port}}"
- kibana_cpu_limit: "{{openshift_logging_kibana_ops_cpu_limit }}"
- kibana_memory_limit: "{{openshift_logging_kibana_ops_memory_limit }}"
- kibana_proxy_cpu_limit: "{{openshift_logging_kibana_ops_proxy_cpu_limit }}"
- kibana_proxy_memory_limit: "{{openshift_logging_kibana_ops_proxy_memory_limit }}"
- replicas: "{{kibana_ops_replica_count.stdout | default (0)}}"
- kibana_node_selector: "{{openshift_logging_kibana_ops_nodeselector | default({})}}"
- when: openshift_logging_use_ops | bool
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index aec455c22..7c1062b77 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -2,89 +2,252 @@
- name: Gather OpenShift Logging Facts
openshift_logging_facts:
oc_bin: "{{openshift.common.client_binary}}"
- admin_kubeconfig: "{{mktemp.stdout}}/admin.kubeconfig"
openshift_logging_namespace: "{{openshift_logging_namespace}}"
- tags: logging_facts
+
+- name: Set logging project
+ oc_project:
+ state: present
+ name: "{{ openshift_logging_namespace }}"
+ node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
+
+- name: Labeling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ item.key }}"
+ value: "{{ item.value }}"
+ with_dict: "{{ openshift_logging_labels | default({}) }}"
+ when:
+ - openshift_logging_labels is defined
+ - openshift_logging_labels is dict
+
+- name: Labeling logging project
+ oc_label:
+ state: present
+ kind: namespace
+ name: "{{ openshift_logging_namespace }}"
+ labels:
+ - key: "{{ openshift_logging_label_key }}"
+ value: "{{ openshift_logging_label_value }}"
+ when:
+ - openshift_logging_label_key is defined
+ - openshift_logging_label_key != ""
+ - openshift_logging_label_value is defined
+
+- name: Create logging cert directory
+ file:
+ path: "{{ openshift.common.config_base }}/logging"
+ state: directory
+ mode: 0755
+ changed_when: False
check_mode: no
-- name: Validate Elasticsearch cluster size
- fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
- when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
-
-- name: Validate Elasticsearch Ops cluster size
- fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
- when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int
-
-- name: Install logging
- include: "{{ role_path }}/tasks/install_{{ install_component }}.yaml"
- when: openshift_hosted_logging_install | default(true) | bool
- with_items:
- - support
- - elasticsearch
- - kibana
- - curator
- - fluentd
- loop_control:
- loop_var: install_component
-
-- name: Install logging mux
- include: "{{ role_path }}/tasks/install_mux.yaml"
- when: openshift_logging_use_mux
-
-- find: paths={{ mktemp.stdout }}/templates patterns=*.yaml
- register: object_def_files
- changed_when: no
-
-- slurp: src={{item}}
- register: object_defs
- with_items: "{{object_def_files.files | map(attribute='path') | list | sort}}"
- changed_when: no
-
-- name: Create objects
- include: oc_apply.yaml
+- include: generate_certs.yaml
vars:
- - kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- - namespace: "{{ openshift_logging_namespace }}"
- - file_name: "{{ file.source }}"
- - file_content: "{{ file.content | b64decode | from_yaml }}"
- with_items: "{{ object_defs.results }}"
- loop_control:
- loop_var: file
- when: not ansible_check_mode
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
-- include: update_master_config.yaml
+## Elasticsearch
+
+- set_fact: es_indices={{ es_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
+
+- set_fact: es_indices=[]
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count == 0
+
+- set_fact: openshift_logging_es_pvc_prefix="logging-es"
+ when: openshift_logging_es_pvc_prefix == ""
+
+# We don't allow scaling down of ES nodes currently
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch.pvcs }}"
+ - "{{ es_indices }}"
+ when:
+ - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
+
+# Create any new DC that may be required
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+
+- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
+ with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ when:
+ - openshift_logging_use_ops | bool
+ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
+
+- set_fact: es_ops_indices=[]
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count == 0
+
+
+- include_role:
+ name: openshift_logging_elasticsearch
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_ops_deployment: true
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
-- name: Printing out objects to create
- debug: msg={{file.content | b64decode }}
- with_items: "{{ object_defs.results }}"
- loop_control:
- loop_var: file
- when: ansible_check_mode
-
- # TODO replace task with oc_secret module that supports
- # linking when available
-- name: Link Pull Secrets With Service Accounts
- include: oc_secret.yaml
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_together:
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
+ - "{{ es_ops_indices }}"
+ when:
+ - openshift_logging_use_ops | bool
+ - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
+
+# Create any new DC that may be required
+- include_role:
+ name: openshift_logging_elasticsearch
vars:
- kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
- subcommand: link
- service_account: "{{sa_account}}"
- secret_name: "{{openshift_logging_image_pull_secret}}"
- add_args: "--for=pull"
- with_items:
- - default
- - aggregated-logging-elasticsearch
- - aggregated-logging-kibana
- - aggregated-logging-fluentd
- - aggregated-logging-curator
- register: link_pull_secret
- loop_control:
- loop_var: sa_account
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_ops_deployment: true
+ openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
+
+ openshift_logging_elasticsearch_storage_type: "{{ 'pvc' if ( openshift_logging_es_pvc_dynamic | bool or openshift_hosted_logging_storage_kind | default('') == 'nfs') else 'emptydir' }}"
+ openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
+ openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
+ openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+
+ with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
when:
- - openshift_logging_image_pull_secret is defined
- - openshift_logging_image_pull_secret != ''
- failed_when: link_pull_secret.rc != 0
+ - openshift_logging_use_ops | bool
+
+
+## Kibana
+- include_role:
+ name: openshift_logging_kibana
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
+ openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_replica_count }}"
+ openshift_logging_kibana_es_host: "{{ openshift_logging_es_host }}"
+ openshift_logging_kibana_es_port: "{{ openshift_logging_es_port }}"
+ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+
+
+- include_role:
+ name: openshift_logging_kibana
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_kibana_ops_deployment: true
+ openshift_logging_kibana_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_kibana_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_kibana_master_public_url: "{{ openshift_logging_master_public_url }}"
+ openshift_logging_kibana_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_kibana_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_kibana_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"
+ openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
+ openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
+ openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
+ openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
+ openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
+ openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
+ openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
+ openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
+ openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
+ openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
+ openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}"
+ when:
+ - openshift_logging_use_ops | bool
+
-- name: Scaling up cluster
- include: start_cluster.yaml
- when: start_cluster | default(true) | bool
+## Curator
+- include_role:
+ name: openshift_logging_curator
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_es_host: "{{ openshift_logging_es_host }}"
+ openshift_logging_curator_es_port: "{{ openshift_logging_es_port }}"
+ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+
+- include_role:
+ name: openshift_logging_curator
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_curator_ops_deployment: true
+ openshift_logging_curator_es_host: "{{ openshift_logging_es_ops_host }}"
+ openshift_logging_curator_es_port: "{{ openshift_logging_es_ops_port }}"
+ openshift_logging_curator_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
+ openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
+ openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
+ when:
+ - openshift_logging_use_ops | bool
+
+## Mux
+- include_role:
+ name: openshift_logging_mux
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_mux_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"
+ openshift_logging_mux_namespace: "{{ openshift_logging_namespace }}"
+ openshift_logging_mux_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_mux_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_mux_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_mux_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ when:
+ - openshift_logging_use_mux | bool
+
+
+## Fluentd
+- include_role:
+ name: openshift_logging_fluentd
+ vars:
+ generated_certs_dir: "{{openshift.common.config_base}}/logging"
+ openshift_logging_fluentd_ops_host: "{{ ( openshift_logging_use_ops | bool ) | ternary('logging-es-ops', 'logging-es') }}"
+ openshift_logging_fluentd_use_journal: "{{ openshift.docker.options | search('journald') }}"
+ openshift_logging_fluentd_image_prefix: "{{ openshift_logging_image_prefix }}"
+ openshift_logging_fluentd_image_version: "{{ openshift_logging_image_version }}"
+ openshift_logging_fluentd_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
+ openshift_logging_fluentd_master_url: "{{ openshift_logging_master_url }}"
+ openshift_logging_fluentd_namespace: "{{ openshift_logging_namespace }}"
+
+- include: update_master_config.yaml
diff --git a/roles/openshift_logging/tasks/install_mux.yaml b/roles/openshift_logging/tasks/install_mux.yaml
deleted file mode 100644
index 91eeb95a1..000000000
--- a/roles/openshift_logging/tasks/install_mux.yaml
+++ /dev/null
@@ -1,67 +0,0 @@
----
-- set_fact: mux_ops_host={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_host, openshift_logging_es_host) }}
- check_mode: no
-
-- set_fact: mux_ops_port={{ (openshift_logging_use_ops | bool) | ternary(openshift_logging_es_ops_port, openshift_logging_es_port) }}
- check_mode: no
-
-- name: Check mux current replica count
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get dc/logging-mux
- -o jsonpath='{.spec.replicas}' -n {{openshift_logging_namespace}}
- register: mux_replica_count
- when: not ansible_check_mode
- ignore_errors: yes
- changed_when: no
-
-- name: Generating mux deploymentconfig
- template: src=mux.j2 dest={{mktemp.stdout}}/templates/logging-mux-dc.yaml
- vars:
- component: mux
- logging_component: mux
- deploy_name: "logging-{{component}}"
- image: "{{openshift_logging_image_prefix}}logging-fluentd:{{openshift_logging_image_version}}"
- es_host: logging-es
- es_port: "{{openshift_logging_es_port}}"
- ops_host: "{{ mux_ops_host }}"
- ops_port: "{{ mux_ops_port }}"
- mux_cpu_limit: "{{openshift_logging_mux_cpu_limit}}"
- mux_memory_limit: "{{openshift_logging_mux_memory_limit}}"
- replicas: "{{mux_replica_count.stdout | default (0)}}"
- mux_node_selector: "{{openshift_logging_mux_nodeselector | default({})}}"
- check_mode: no
- changed_when: no
-
-- name: "Check mux hostmount-anyuid permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get scc/hostmount-anyuid -o jsonpath='{.users}'
- register: mux_hostmount_anyuid
- check_mode: no
- changed_when: no
-
-- name: "Set hostmount-anyuid permissions for mux"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-scc-to-user hostmount-anyuid system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: mux_output
- failed_when: mux_output.rc == 1 and 'exists' not in mux_output.stderr
- check_mode: no
- when: mux_hostmount_anyuid.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
-
-- name: "Check mux cluster-reader permissions"
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig
- get clusterrolebinding/cluster-readers -o jsonpath='{.userNames}'
- register: mux_cluster_reader
- check_mode: no
- changed_when: no
-
-- name: "Set cluster-reader permissions for mux"
- command: >
- {{ openshift.common.admin_binary}} --config={{ mktemp.stdout }}/admin.kubeconfig policy
- add-cluster-role-to-user cluster-reader system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd
- register: mux2_output
- failed_when: mux2_output.rc == 1 and 'exists' not in mux2_output.stderr
- check_mode: no
- when: mux_cluster_reader.stdout.find("system:serviceaccount:{{openshift_logging_namespace}}:aggregated-logging-fluentd") == -1
diff --git a/roles/openshift_logging/tasks/install_support.yaml b/roles/openshift_logging/tasks/install_support.yaml
deleted file mode 100644
index 877ce3149..000000000
--- a/roles/openshift_logging/tasks/install_support.yaml
+++ /dev/null
@@ -1,73 +0,0 @@
----
-# This is the base configuration for installing the other components
-- name: Set logging project
- oc_project:
- state: present
- name: "{{ openshift_logging_namespace }}"
- node_selector: "{{ openshift_logging_nodeselector | default(null) }}"
-
-- name: Labelling logging project
- oc_label:
- state: present
- kind: namespace
- name: "{{ openshift_logging_namespace }}"
- labels:
- - key: "{{ item.key }}"
- value: "{{ item.value }}"
- with_dict: "{{ openshift_logging_labels | default({}) }}"
- when:
- - openshift_logging_labels is defined
- - openshift_logging_labels is dict
-
-- name: Labelling logging project
- oc_label:
- state: present
- kind: namespace
- name: "{{ openshift_logging_namespace }}"
- labels:
- - key: "{{ openshift_logging_label_key }}"
- value: "{{ openshift_logging_label_value }}"
- when:
- - openshift_logging_label_key is defined
- - openshift_logging_label_key != ""
- - openshift_logging_label_value is defined
-
-- name: Create logging cert directory
- file: path={{openshift.common.config_base}}/logging state=directory mode=0755
- changed_when: False
- check_mode: no
-
-- include: generate_certs.yaml
- vars:
- generated_certs_dir: "{{openshift.common.config_base}}/logging"
-
-- name: Create temp directory for all our templates
- file: path={{mktemp.stdout}}/templates state=directory mode=0755
- changed_when: False
- check_mode: no
-
-- include: generate_secrets.yaml
- vars:
- generated_certs_dir: "{{openshift.common.config_base}}/logging"
-
-- include: generate_configmaps.yaml
-
-- include: generate_services.yaml
-
-- name: Generate kibana-proxy oauth client
- template: src=oauth-client.j2 dest={{mktemp.stdout}}/templates/oauth-client.yaml
- vars:
- secret: "{{oauth_secret}}"
- when: oauth_secret is defined
- check_mode: no
- changed_when: no
-
-- include: generate_clusterroles.yaml
-
-- include: generate_rolebindings.yaml
-
-- include: generate_clusterrolebindings.yaml
-
-- include: generate_serviceaccounts.yaml
-
-- include: generate_routes.yaml
diff --git a/roles/openshift_logging/tasks/main.yaml b/roles/openshift_logging/tasks/main.yaml
index 3d8cd3410..f475024dd 100644
--- a/roles/openshift_logging/tasks/main.yaml
+++ b/roles/openshift_logging/tasks/main.yaml
@@ -30,33 +30,12 @@
check_mode: no
become: no
-- debug: msg="Created local temp dir {{local_tmp.stdout}}"
-
-- name: Copy the admin client config(s)
- command: >
- cp {{ openshift_master_config_dir }}/admin.kubeconfig {{ mktemp.stdout }}/admin.kubeconfig
- changed_when: False
- check_mode: no
- tags: logging_init
-
- include: "{{ role_path }}/tasks/install_logging.yaml"
when: openshift_logging_install_logging | default(false) | bool
-- include: "{{ role_path }}/tasks/upgrade_logging.yaml"
- when: openshift_logging_upgrade_logging | default(false) | bool
-
- include: "{{ role_path }}/tasks/delete_logging.yaml"
when:
- not openshift_logging_install_logging | default(false) | bool
- - not openshift_logging_upgrade_logging | default(false) | bool
-
-- name: Delete temp directory
- file:
- name: "{{ mktemp.stdout }}"
- state: absent
- tags: logging_cleanup
- changed_when: False
- check_mode: no
- name: Cleaning up local temp dir
local_action: file path="{{local_tmp.stdout}}" state=absent
diff --git a/roles/openshift_logging/tasks/oc_apply.yaml b/roles/openshift_logging/tasks/oc_apply.yaml
deleted file mode 100644
index a0ed56ebd..000000000
--- a/roles/openshift_logging/tasks/oc_apply.yaml
+++ /dev/null
@@ -1,52 +0,0 @@
----
-- oc_obj:
- kind: "{{ file_content.kind }}"
- name: "{{ file_content.metadata.name }}"
- state: present
- namespace: "{{ namespace }}"
- files:
- - "{{ file_name }}"
- when: file_content.kind not in ["Service", "Route"]
-
-## still need to do this for services until the template logic is replaced by oc_*
-- block:
- - name: Checking generation of {{file_content.kind}} {{file_content.metadata.name}}
- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- get {{file_content.kind}} {{file_content.metadata.name}}
- -o jsonpath='{.metadata.resourceVersion}'
- -n {{namespace}}
- register: generation_init
- failed_when: "'not found' not in generation_init.stderr and generation_init.stdout == ''"
- changed_when: no
-
- - name: Applying {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: no
-
- - name: Removing previous {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- delete -f {{ file_name }}
- -n {{ namespace }}
- register: generation_delete
- failed_when: "'error' in generation_delete.stderr"
- changed_when: generation_delete.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
-
- - name: Recreating {{file_name}}
- command: >
- {{ openshift.common.client_binary }} --config={{ kubeconfig }}
- apply -f {{ file_name }}
- -n {{ namespace }}
- register: generation_apply
- failed_when: "'error' in generation_apply.stderr"
- changed_when: generation_apply.rc == 0
- when: "'field is immutable' in generation_apply.stderr"
- when: file_content.kind in ["Service", "Route"]
diff --git a/roles/openshift_logging/tasks/oc_secret.yaml b/roles/openshift_logging/tasks/oc_secret.yaml
deleted file mode 100644
index de37e4f6d..000000000
--- a/roles/openshift_logging/tasks/oc_secret.yaml
+++ /dev/null
@@ -1,7 +0,0 @@
----
-- command: >
- {{ openshift.common.client_binary }}
- --config={{ kubeconfig }}
- secret {{subcommand}} {{service_account}} {{secret_name}}
- {{add_args}}
- -n {{openshift_logging_namespace}}
diff --git a/roles/openshift_logging/tasks/procure_server_certs.yaml b/roles/openshift_logging/tasks/procure_server_certs.yaml
index 7ab140357..00de0ca06 100644
--- a/roles/openshift_logging/tasks/procure_server_certs.yaml
+++ b/roles/openshift_logging/tasks/procure_server_certs.yaml
@@ -27,7 +27,7 @@
- name: Creating signed server cert and key for {{ cert_info.procure_component }}
command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
+ {{ openshift.common.client_binary }} adm --config={{ mktemp.stdout }}/admin.kubeconfig ca create-server-cert
--key={{generated_certs_dir}}/{{cert_info.procure_component}}.key --cert={{generated_certs_dir}}/{{cert_info.procure_component}}.crt
--hostnames={{cert_info.hostnames|quote}} --signer-cert={{generated_certs_dir}}/ca.crt --signer-key={{generated_certs_dir}}/ca.key
--signer-serial={{generated_certs_dir}}/ca.serial.txt
diff --git a/roles/openshift_logging/tasks/set_es_storage.yaml b/roles/openshift_logging/tasks/set_es_storage.yaml
deleted file mode 100644
index 4afe4e641..000000000
--- a/roles/openshift_logging/tasks/set_es_storage.yaml
+++ /dev/null
@@ -1,80 +0,0 @@
----
-- set_fact: es_storage_type="{{ es_spec.volumes['elasticsearch-storage'] }}"
- when: es_spec.volumes is defined
-
-- set_fact: es_storage_claim="{{ es_spec.volumes['elasticsearch-storage'].persistentVolumeClaim.claimName }}"
- when:
- - es_spec.volumes is defined
- - es_storage_type.persistentVolumeClaim is defined
-
-- set_fact: es_storage_claim=""
- when:
- - not es_spec.volumes is defined or not es_storage_type.persistentVolumeClaim is defined
-
-## take an ES dc and evaluate its storage option
-# if it is a hostmount or emptydir we don't do anything with it
-# if its a pvc we see if the corresponding pvc matches the provided specs (if they exist)
-- oc_obj:
- state: list
- kind: pvc
- name: "{{ es_storage_claim }}"
- namespace: "{{ openshift_logging_namespace }}"
- register: pvc_spec
- failed_when: pvc_spec.results.stderr is defined
- when:
- - es_spec.volumes is defined
- - es_storage_type.persistentVolumeClaim is defined
-
-- set_fact: pvc_size="{{ pvc_spec.results.results[0].spec.resources.requests.storage }}"
- when:
- - pvc_spec.results is defined
- - pvc_spec.results.results[0].spec is defined
-
-# if not create the pvc and use it
-- block:
-
- - name: Generating PersistentVolumeClaims
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
- size: "{{ es_pvc_size }}"
- access_modes: "{{ openshift_logging_storage_access_modes }}"
- pv_selector: "{{ es_pv_selector }}"
- when: not es_pvc_dynamic | bool
- check_mode: no
- changed_when: no
-
- - name: Generating PersistentVolumeClaims - Dynamic
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/logging-{{obj_name}}-pvc.yaml
- vars:
- obj_name: "{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
- annotations:
- volume.alpha.kubernetes.io/storage-class: "dynamic"
- size: "{{ es_pvc_size }}"
- access_modes: "{{ openshift_logging_storage_access_modes }}"
- pv_selector: "{{ es_pv_selector }}"
- when: es_pvc_dynamic | bool
- check_mode: no
- changed_when: no
-
- - set_fact: es_storage_claim="{{ es_pvc_prefix }}-{{ es_pvc_names_count | int + es_pvc_count | int }}"
-
- when:
- - es_pvc_size | search('^\d.*')
- - not es_spec.volumes is defined or not es_storage_claim | search( es_pvc_prefix ) or ( not pvc_size | search( es_pvc_size ) and not es_pvc_size | search( pvc_size ) )
-
-- name: Generate Elasticsearch DeploymentConfig
- template: src=es.j2 dest={{mktemp.stdout}}/templates/logging-{{deploy_name}}-dc.yaml
- vars:
- component: "{{ es_component }}"
- deploy_name: "{{ es_name }}"
- logging_component: elasticsearch
- deploy_name_prefix: "logging-{{ es_component }}"
- image: "{{openshift_logging_image_prefix}}logging-elasticsearch:{{openshift_logging_image_version}}"
- es_cluster_name: "{{component}}"
- es_cpu_limit: "{{ es_cpu_limit }}"
- es_memory_limit: "{{ es_memory_limit }}"
- es_node_selector: "{{ es_node_selector }}"
- es_storage: "{{ openshift_logging_facts | es_storage( es_name, es_storage_claim ) }}"
- check_mode: no
- changed_when: no
diff --git a/roles/openshift_logging/tasks/start_cluster.yaml b/roles/openshift_logging/tasks/start_cluster.yaml
deleted file mode 100644
index c1592b830..000000000
--- a/roles/openshift_logging/tasks/start_cluster.yaml
+++ /dev/null
@@ -1,156 +0,0 @@
----
-- name: Retrieve list of fluentd hosts
- oc_obj:
- state: list
- kind: node
- when: "'--all' in openshift_logging_fluentd_hosts"
- register: fluentd_hosts
-
-- name: Set fact openshift_logging_fluentd_hosts
- set_fact:
- openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- when: "'--all' in openshift_logging_fluentd_hosts"
-
-- name: start fluentd
- oc_label:
- name: "{{ fluentd_host }}"
- kind: node
- state: add
- labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
- with_items: "{{ openshift_logging_fluentd_hosts }}"
- loop_control:
- loop_var: fluentd_host
-
-- name: Retrieve mux
- oc_obj:
- state: list
- kind: dc
- selector: "component=mux"
- namespace: "{{openshift_logging_namespace}}"
- register: mux_dc
- when: openshift_logging_use_mux
-
-- name: start mux
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: "{{ openshift_logging_mux_replica_count | default (1) }}"
- with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
- loop_control:
- loop_var: object
- when:
- - mux_dc.results is defined
- - mux_dc.results.results is defined
- - openshift_logging_use_mux
-
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve kibana
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: start kibana
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: "{{ openshift_logging_kibana_replica_count | default (1) }}"
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: start curator
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve elasticsearch-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=es-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve kibana-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: start kibana-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: "{{ openshift_logging_kibana_ops_replica_count | default (1) }}"
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: start curator-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/stop_cluster.yaml b/roles/openshift_logging/tasks/stop_cluster.yaml
deleted file mode 100644
index f4b419d84..000000000
--- a/roles/openshift_logging/tasks/stop_cluster.yaml
+++ /dev/null
@@ -1,153 +0,0 @@
----
-- name: Retrieve list of fluentd hosts
- oc_obj:
- state: list
- kind: node
- when: "'--all' in openshift_logging_fluentd_hosts"
- register: fluentd_hosts
-
-- name: Set fact openshift_logging_fluentd_hosts
- set_fact:
- openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- when: "'--all' in openshift_logging_fluentd_hosts"
-
-- name: stop fluentd
- oc_label:
- name: "{{ fluentd_host }}"
- kind: node
- state: absent
- labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
- with_items: "{{ openshift_logging_fluentd_hosts }}"
- loop_control:
- loop_var: fluentd_host
-
-- name: Retrieve mux
- oc_obj:
- state: list
- kind: dc
- selector: "component=mux"
- namespace: "{{openshift_logging_namespace}}"
- register: mux_dc
- when: openshift_logging_use_mux
-
-- name: stop mux
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ mux_dc.results.results[0]['items'] | map(attribute='metadata.name') | list if 'results' in mux_dc else [] }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_mux
-
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: stop elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve kibana
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: stop kibana
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: stop curator
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Retrieve elasticsearch-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=es-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: stop elasticsearch-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve kibana-ops
- oc_obj:
- state: list
- kind: dc
- selector: "component=kibana-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: kibana_dc
-
-- name: stop kibana-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ kibana_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
-
-- name: Retrieve curator
- oc_obj:
- state: list
- kind: dc
- selector: "component=curator-ops"
- namespace: "{{openshift_logging_namespace}}"
- register: curator_dc
-
-- name: stop curator-ops
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 0
- with_items: "{{ curator_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
- when: openshift_logging_use_ops | bool
diff --git a/roles/openshift_logging/tasks/upgrade_logging.yaml b/roles/openshift_logging/tasks/upgrade_logging.yaml
deleted file mode 100644
index 30fdbd2af..000000000
--- a/roles/openshift_logging/tasks/upgrade_logging.yaml
+++ /dev/null
@@ -1,48 +0,0 @@
----
-- name: Stop the Cluster
- include: stop_cluster.yaml
-
-- name: Upgrade logging
- include: install_logging.yaml
- vars:
- start_cluster: False
-
-# start ES so that we can run migrate script
-- name: Retrieve elasticsearch
- oc_obj:
- state: list
- kind: dc
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: es_dc
-
-- name: start elasticsearch
- oc_scale:
- kind: dc
- name: "{{ object }}"
- namespace: "{{openshift_logging_namespace}}"
- replicas: 1
- with_items: "{{ es_dc.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
- loop_control:
- loop_var: object
-
-- name: Wait for pods to start
- oc_obj:
- state: list
- kind: pods
- selector: "component=es"
- namespace: "{{openshift_logging_namespace}}"
- register: running_pod
- until: running_pod.results.results[0]['items'] | selectattr('status.phase', 'match', '^Running$') | map(attribute='metadata.name') | list | length != 0
- retries: 30
- delay: 10
-
-- name: Run upgrade script
- script: es_migration.sh {{openshift.common.config_base}}/logging/ca.crt {{openshift.common.config_base}}/logging/system.admin.key {{openshift.common.config_base}}/logging/system.admin.crt {{openshift_logging_es_host}} {{openshift_logging_es_port}} {{openshift_logging_namespace}}
- register: script_output
- changed_when:
- - script_output.rc == 0
- - script_output.stdout.find("skipping update_for_uuid") == -1 or script_output.stdout.find("skipping update_for_common_data_model") == -1
-
-- name: Start up rest of cluster
- include: start_cluster.yaml
diff --git a/roles/openshift_logging/templates/clusterrole.j2 b/roles/openshift_logging/templates/clusterrole.j2
deleted file mode 100644
index 0d28db48e..000000000
--- a/roles/openshift_logging/templates/clusterrole.j2
+++ /dev/null
@@ -1,21 +0,0 @@
-apiVersion: v1
-kind: ClusterRole
-metadata:
- name: {{obj_name}}
-rules:
-{% for rule in rules %}
-- resources:
-{% for kind in rule.resources %}
- - {{ kind }}
-{% endfor %}
- apiGroups:
-{% if rule.api_groups is defined %}
-{% for group in rule.api_groups %}
- - {{ group }}
-{% endfor %}
-{% endif %}
- verbs:
-{% for verb in rule.verbs %}
- - {{ verb }}
-{% endfor %}
-{% endfor %}
diff --git a/roles/openshift_logging/templates/clusterrolebinding.j2 b/roles/openshift_logging/templates/clusterrolebinding.j2
deleted file mode 100644
index 2d25ff1fb..000000000
--- a/roles/openshift_logging/templates/clusterrolebinding.j2
+++ /dev/null
@@ -1,24 +0,0 @@
-apiVersion: v1
-kind: ClusterRoleBinding
-metadata:
- name: {{obj_name}}
-{% if crb_usernames is defined %}
-userNames:
-{% for name in crb_usernames %}
- - {{ name }}
-{% endfor %}
-{% endif %}
-{% if crb_groupnames is defined %}
-groupNames:
-{% for name in crb_groupnames %}
- - {{ name }}
-{% endfor %}
-{% endif %}
-subjects:
-{% for sub in subjects %}
- - kind: {{ sub.kind }}
- name: {{ sub.name }}
- namespace: {{sub.namespace}}
-{% endfor %}
-roleRef:
- name: {{obj_name}}
diff --git a/roles/openshift_logging/templates/es-storage-emptydir.partial b/roles/openshift_logging/templates/es-storage-emptydir.partial
deleted file mode 100644
index ccd01a816..000000000
--- a/roles/openshift_logging/templates/es-storage-emptydir.partial
+++ /dev/null
@@ -1 +0,0 @@
- emptyDir: {}
diff --git a/roles/openshift_logging/templates/es-storage-hostpath.partial b/roles/openshift_logging/templates/es-storage-hostpath.partial
deleted file mode 100644
index 07ddad9ba..000000000
--- a/roles/openshift_logging/templates/es-storage-hostpath.partial
+++ /dev/null
@@ -1,2 +0,0 @@
- hostPath:
- path: {{es_storage['path']}}
diff --git a/roles/openshift_logging/templates/es-storage-pvc.partial b/roles/openshift_logging/templates/es-storage-pvc.partial
deleted file mode 100644
index fcbff68de..000000000
--- a/roles/openshift_logging/templates/es-storage-pvc.partial
+++ /dev/null
@@ -1,2 +0,0 @@
- persistentVolumeClaim:
- claimName: {{es_storage['pvc_claim']}}
diff --git a/roles/openshift_logging/templates/fluentd.j2 b/roles/openshift_logging/templates/fluentd.j2
deleted file mode 100644
index 5c93d823e..000000000
--- a/roles/openshift_logging/templates/fluentd.j2
+++ /dev/null
@@ -1,167 +0,0 @@
-apiVersion: extensions/v1beta1
-kind: "DaemonSet"
-metadata:
- name: "{{daemonset_name}}"
- labels:
- provider: openshift
- component: "{{daemonset_component}}"
- logging-infra: "{{daemonset_component}}"
-spec:
- selector:
- matchLabels:
- provider: openshift
- component: "{{daemonset_component}}"
- updateStrategy:
- type: RollingUpdate
- rollingUpdate:
- minReadySeconds: 600
- template:
- metadata:
- name: "{{daemonset_container_name}}"
- labels:
- logging-infra: "{{daemonset_component}}"
- provider: openshift
- component: "{{daemonset_component}}"
- spec:
- serviceAccountName: "{{daemonset_serviceAccount}}"
- nodeSelector:
- {{fluentd_nodeselector_key}}: "{{fluentd_nodeselector_value}}"
- containers:
- - name: "{{daemonset_container_name}}"
- image: "{{openshift_logging_image_prefix}}{{daemonset_name}}:{{openshift_logging_image_version}}"
- imagePullPolicy: Always
- securityContext:
- privileged: true
- resources:
- limits:
- cpu: {{openshift_logging_fluentd_cpu_limit}}
- memory: {{openshift_logging_fluentd_memory_limit}}
- volumeMounts:
- - name: runlogjournal
- mountPath: /run/log/journal
- - name: varlog
- mountPath: /var/log
- - name: varlibdockercontainers
- mountPath: /var/lib/docker/containers
- readOnly: true
- - name: config
- mountPath: /etc/fluent/configs.d/user
- readOnly: true
- - name: certs
- mountPath: /etc/fluent/keys
- readOnly: true
- - name: dockerhostname
- mountPath: /etc/docker-hostname
- readOnly: true
- - name: localtime
- mountPath: /etc/localtime
- readOnly: true
- - name: dockercfg
- mountPath: /etc/sysconfig/docker
- readOnly: true
- - name: dockerdaemoncfg
- mountPath: /etc/docker
- readOnly: true
-{% if openshift_logging_use_mux_client | bool %}
- - name: muxcerts
- mountPath: /etc/fluent/muxkeys
- readOnly: true
-{% endif %}
- env:
- - name: "K8S_HOST_URL"
- value: "{{openshift_logging_master_url}}"
- - name: "ES_HOST"
- value: "{{openshift_logging_es_host}}"
- - name: "ES_PORT"
- value: "{{openshift_logging_es_port}}"
- - name: "ES_CLIENT_CERT"
- value: "{{openshift_logging_es_client_cert}}"
- - name: "ES_CLIENT_KEY"
- value: "{{openshift_logging_es_client_key}}"
- - name: "ES_CA"
- value: "{{openshift_logging_es_ca}}"
- - name: "OPS_HOST"
- value: "{{ops_host}}"
- - name: "OPS_PORT"
- value: "{{ops_port}}"
- - name: "OPS_CLIENT_CERT"
- value: "{{openshift_logging_es_ops_client_cert}}"
- - name: "OPS_CLIENT_KEY"
- value: "{{openshift_logging_es_ops_client_key}}"
- - name: "OPS_CA"
- value: "{{openshift_logging_es_ops_ca}}"
- - name: "ES_COPY"
- value: "{{openshift_logging_fluentd_es_copy|lower}}"
- - name: "ES_COPY_HOST"
- value: "{{es_copy_host | default('')}}"
- - name: "ES_COPY_PORT"
- value: "{{es_copy_port | default('')}}"
- - name: "ES_COPY_SCHEME"
- value: "{{es_copy_scheme | default('https')}}"
- - name: "ES_COPY_CLIENT_CERT"
- value: "{{es_copy_client_cert | default('')}}"
- - name: "ES_COPY_CLIENT_KEY"
- value: "{{es_copy_client_key | default('')}}"
- - name: "ES_COPY_CA"
- value: "{{es_copy_ca | default('')}}"
- - name: "ES_COPY_USERNAME"
- value: "{{es_copy_username | default('')}}"
- - name: "ES_COPY_PASSWORD"
- value: "{{es_copy_password | default('')}}"
- - name: "OPS_COPY_HOST"
- value: "{{ops_copy_host | default('')}}"
- - name: "OPS_COPY_PORT"
- value: "{{ops_copy_port | default('')}}"
- - name: "OPS_COPY_SCHEME"
- value: "{{ops_copy_scheme | default('https')}}"
- - name: "OPS_COPY_CLIENT_CERT"
- value: "{{ops_copy_client_cert | default('')}}"
- - name: "OPS_COPY_CLIENT_KEY"
- value: "{{ops_copy_client_key | default('')}}"
- - name: "OPS_COPY_CA"
- value: "{{ops_copy_ca | default('')}}"
- - name: "OPS_COPY_USERNAME"
- value: "{{ops_copy_username | default('')}}"
- - name: "OPS_COPY_PASSWORD"
- value: "{{ops_copy_password | default('')}}"
- - name: "USE_JOURNAL"
- value: "{{openshift_logging_fluentd_use_journal|lower}}"
- - name: "JOURNAL_SOURCE"
- value: "{{openshift_logging_fluentd_journal_source | default('')}}"
- - name: "JOURNAL_READ_FROM_HEAD"
- value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
- - name: "USE_MUX_CLIENT"
- value: "{{openshift_logging_use_mux_client| default('false')}}"
- volumes:
- - name: runlogjournal
- hostPath:
- path: /run/log/journal
- - name: varlog
- hostPath:
- path: /var/log
- - name: varlibdockercontainers
- hostPath:
- path: /var/lib/docker/containers
- - name: config
- configMap:
- name: logging-fluentd
- - name: certs
- secret:
- secretName: logging-fluentd
- - name: dockerhostname
- hostPath:
- path: /etc/hostname
- - name: localtime
- hostPath:
- path: /etc/localtime
- - name: dockercfg
- hostPath:
- path: /etc/sysconfig/docker
- - name: dockerdaemoncfg
- hostPath:
- path: /etc/docker
-{% if openshift_logging_use_mux_client | bool %}
- - name: muxcerts
- secret:
- secretName: logging-mux
-{% endif %}
diff --git a/roles/openshift_logging/templates/secret.j2 b/roles/openshift_logging/templates/secret.j2
deleted file mode 100644
index eba4197da..000000000
--- a/roles/openshift_logging/templates/secret.j2
+++ /dev/null
@@ -1,9 +0,0 @@
-apiVersion: v1
-kind: Secret
-metadata:
- name: "{{secret_name}}"
-type: Opaque
-data:
-{% for s in secrets %}
- "{{s.key}}" : "{{s.value | b64encode}}"
-{% endfor %}
diff --git a/roles/openshift_logging/templates/service.j2 b/roles/openshift_logging/templates/service.j2
deleted file mode 100644
index 70644a39c..000000000
--- a/roles/openshift_logging/templates/service.j2
+++ /dev/null
@@ -1,34 +0,0 @@
-apiVersion: "v1"
-kind: "Service"
-metadata:
- name: "{{obj_name}}"
-{% if labels is defined%}
- labels:
-{% for key, value in labels.iteritems() %}
- {{key}}: {{value}}
-{% endfor %}
-{% endif %}
-spec:
- ports:
-{% for port in ports %}
- -
-{% for key, value in port.iteritems() %}
- {{key}}: {{value}}
-{% endfor %}
-{% if port.targetPort is undefined %}
- clusterIP: "None"
-{% endif %}
-{% endfor %}
-{% if service_targetPort is defined %}
- targetPort: {{service_targetPort}}
-{% endif %}
- selector:
- {% for key, value in selector.iteritems() %}
- {{key}}: {{value}}
- {% endfor %}
-{% if externalIPs is defined -%}
- externalIPs:
-{% for ip in externalIPs %}
- - {{ ip }}
-{% endfor %}
-{% endif %}
diff --git a/roles/openshift_logging/templates/serviceaccount.j2 b/roles/openshift_logging/templates/serviceaccount.j2
deleted file mode 100644
index b22acc594..000000000
--- a/roles/openshift_logging/templates/serviceaccount.j2
+++ /dev/null
@@ -1,16 +0,0 @@
-apiVersion: v1
-kind: ServiceAccount
-metadata:
- name: {{obj_name}}
-{% if labels is defined%}
- labels:
-{% for key, value in labels.iteritems() %}
- {{key}}: {{value}}
-{% endfor %}
-{% endif %}
-{% if secrets is defined %}
-secrets:
-{% for name in secrets %}
-- name: {{ name }}
-{% endfor %}
-{% endif %}
diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml
new file mode 100644
index 000000000..82ffb2f93
--- /dev/null
+++ b/roles/openshift_logging_curator/defaults/main.yml
@@ -0,0 +1,33 @@
+---
+### General logging settings
+openshift_logging_curator_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_curator_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_curator_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_curator_master_url: "https://kubernetes.default.svc.cluster.local"
+
+openshift_logging_curator_namespace: logging
+
+### Common settings
+openshift_logging_curator_nodeselector: ""
+openshift_logging_curator_cpu_limit: 100m
+openshift_logging_curator_memory_limit: null
+
+openshift_logging_curator_es_host: "logging-es"
+openshift_logging_curator_es_port: 9200
+
+# This should not exceed 1, should check for this
+openshift_logging_curator_replicas: 1
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_curator_ops_deployment: false
+
+openshift_logging_curator_default_days: 30
+openshift_logging_curator_run_hour: 0
+openshift_logging_curator_run_minute: 0
+openshift_logging_curator_run_timezone: UTC
+openshift_logging_curator_script_log_level: INFO
+openshift_logging_curator_log_level: ERROR
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#curator_config_contents:
diff --git a/roles/openshift_logging/files/curator.yml b/roles/openshift_logging_curator/files/curator.yml
index 8d62d8e7d..8d62d8e7d 100644
--- a/roles/openshift_logging/files/curator.yml
+++ b/roles/openshift_logging_curator/files/curator.yml
diff --git a/roles/openshift_logging_curator/meta/main.yaml b/roles/openshift_logging_curator/meta/main.yaml
new file mode 100644
index 000000000..6752fb7f9
--- /dev/null
+++ b/roles/openshift_logging_curator/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Curator Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_curator/tasks/determine_version.yaml b/roles/openshift_logging_curator/tasks/determine_version.yaml
new file mode 100644
index 000000000..94f8b4a97
--- /dev/null
+++ b/roles/openshift_logging_curator/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ curator_version: "{{ __latest_curator_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: curator_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Curator
+ when: curator_version not in __allowed_curator_versions
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
new file mode 100644
index 000000000..ae7e48caa
--- /dev/null
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -0,0 +1,113 @@
+---
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# service account
+- name: Create Curator service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Curator service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# configmap
+- copy:
+ src: curator.yml
+ dest: "{{ tempdir }}/curator.yml"
+ when: curator_config_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{ curator_config_contents }}"
+ dest: "{{ tempdir }}/curator.yml"
+ when: curator_config_contents is defined
+ changed_when: no
+
+- name: Set Curator configmap
+ oc_configmap:
+ state: present
+ name: "logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ from_file:
+ config.yaml: "{{ tempdir }}/curator.yml"
+
+# secret
+- name: Set Curator secret
+ oc_secret:
+ state: present
+ name: "logging-curator"
+ namespace: "{{ openshift_logging_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.curator.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.curator.crt"
+
+- set_fact:
+ curator_name: "{{ 'logging-curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+ curator_component: "{{ 'curator' ~ ( (openshift_logging_curator_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+
+# DC
+# TODO: scale should not exceed 1
+- name: Generate Curator deploymentconfig
+ template:
+ src: curator.j2
+ dest: "{{ tempdir }}/templates/curator-dc.yaml"
+ vars:
+ component: "{{ curator_component }}"
+ logging_component: curator
+ deploy_name: "{{ curator_name }}"
+ image: "{{openshift_logging_image_prefix}}logging-curator:{{openshift_logging_image_version}}"
+ es_host: "{{ openshift_logging_curator_es_host }}"
+ es_port: "{{ openshift_logging_curator_es_port }}"
+ curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
+ curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
+ replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
+ curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
+ check_mode: no
+ changed_when: no
+
+- name: Set Curator DC
+ oc_obj:
+ state: present
+ name: "{{ curator_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/curator-dc.yaml"
+ delete_after: true
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index c6284166b..1bf9b9de2 100644
--- a/roles/openshift_logging/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -7,7 +7,7 @@ metadata:
component: "{{component}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -42,13 +42,13 @@ spec:
resources:
limits:
cpu: "{{curator_cpu_limit}}"
-{% if curator_memory_limit is defined and curator_memory_limit is not none %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
{% endif %}
env:
-
name: "K8S_HOST_URL"
- value: "{{openshift_logging_master_url}}"
+ value: "{{openshift_logging_curator_master_url}}"
-
name: "ES_HOST"
value: "{{es_host}}"
diff --git a/roles/openshift_logging_curator/vars/main.yml b/roles/openshift_logging_curator/vars/main.yml
new file mode 100644
index 000000000..97525479e
--- /dev/null
+++ b/roles/openshift_logging_curator/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_curator_version: "3_5"
+__allowed_curator_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
new file mode 100644
index 000000000..c0b5d394e
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -0,0 +1,57 @@
+---
+### Common settings
+openshift_logging_elasticsearch_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_elasticsearch_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_elasticsearch_namespace: logging
+
+openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}"
+openshift_logging_elasticsearch_cpu_limit: 1000m
+openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}"
+openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}"
+
+openshift_logging_elasticsearch_replica_count: 1
+
+# ES deployment type
+openshift_logging_elasticsearch_deployment_type: "data-master"
+
+# ES deployment name
+openshift_logging_elasticsearch_deployment_name: ""
+
+# One of ['emptydir', 'pvc', 'hostmount']
+openshift_logging_elasticsearch_storage_type: "emptydir"
+
+# hostmount options
+openshift_logging_elasticsearch_hostmount_path: ""
+
+# pvc options
+# the name of the PVC we will bind to -- create it if it does not exist
+openshift_logging_elasticsearch_pvc_name: ""
+
+# required if the PVC does not already exist
+openshift_logging_elasticsearch_pvc_size: ""
+openshift_logging_elasticsearch_pvc_dynamic: false
+openshift_logging_elasticsearch_pvc_pv_selector: {}
+openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce']
+openshift_logging_elasticsearch_storage_group: '65534'
+
+openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_elasticsearch_ops_deployment: false
+
+openshift_logging_elasticsearch_ops_allow_cluster_reader: false
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#es_logging_contents:
+#es_config_contents:
+
+
+openshift_logging_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_es_host: logging-es
+openshift_logging_es_port: 9200
+openshift_logging_es_ca: /etc/fluent/keys/ca
+openshift_logging_es_client_cert: /etc/fluent/keys/cert
+openshift_logging_es_client_key: /etc/fluent/keys/key
diff --git a/roles/openshift_logging/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh
index 339b5a1b2..339b5a1b2 100644
--- a/roles/openshift_logging/files/es_migration.sh
+++ b/roles/openshift_logging_elasticsearch/files/es_migration.sh
diff --git a/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
new file mode 100644
index 000000000..567c9f289
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/files/rolebinding-reader.yml
@@ -0,0 +1,9 @@
+apiVersion: v1
+kind: ClusterRole
+metadata:
+ name: rolebinding-reader
+rules:
+- resources:
+ - clusterrolebindings
+ verbs:
+ - get
diff --git a/roles/openshift_logging_elasticsearch/meta/main.yaml b/roles/openshift_logging_elasticsearch/meta/main.yaml
new file mode 100644
index 000000000..097270772
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Elasticsearch Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
new file mode 100644
index 000000000..1a952b5cf
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/determine_version.yaml
@@ -0,0 +1,19 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ es_version: "{{ __latest_es_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+- debug: var=openshift_logging_image_version
+
+# should we just assume that we will have the correct major version?
+- set_fact: es_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Elasticsearch
+ when: es_version not in __allowed_es_versions
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
new file mode 100644
index 000000000..7e88a7498
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -0,0 +1,278 @@
+---
+- name: Validate Elasticsearch cluster size
+ fail: msg="The openshift_logging_es_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch.deploymentconfigs | length > openshift_logging_es_cluster_size|int
+
+- name: Validate Elasticsearch Ops cluster size
+ fail: msg="The openshift_logging_es_ops_cluster_size may only be scaled down manually. Please see official documentation on how to do this."
+ when: openshift_logging_facts.elasticsearch_ops.deploymentconfigs | length > openshift_logging_es_ops_cluster_size|int
+
+- fail:
+ msg: Invalid deployment type, one of ['data-master', 'data-client', 'master', 'client'] allowed
+ when: not openshift_logging_elasticsearch_deployment_type in __allowed_es_types
+
+- set_fact:
+ elasticsearch_name: "{{ 'logging-elasticsearch' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+ es_component: "{{ 'es' ~ ( (openshift_logging_elasticsearch_ops_deployment | default(false) | bool) | ternary('-ops', '') ) }}"
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# service account
+- name: Create ES service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create ES service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# rolebinding reader
+- copy:
+ src: rolebinding-reader.yml
+ dest: "{{ tempdir }}/rolebinding-reader.yml"
+
+- name: Create rolebinding-reader role
+ oc_obj:
+ state: present
+ name: "rolebinding-reader"
+ kind: clusterrole
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/rolebinding-reader.yml"
+ delete_after: true
+
+# SA roles
+- name: Set rolebinding-reader permissions for ES
+ oc_adm_policy_user:
+ state: present
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ resource_kind: cluster-role
+ resource_name: rolebinding-reader
+ user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch"
+
+# View role and binding
+- name: Generate logging-elasticsearch-view-role
+ template:
+ src: rolebinding.j2
+ dest: "{{mktemp.stdout}}/logging-elasticsearch-view-role.yaml"
+ vars:
+ obj_name: logging-elasticsearch-view-role
+ roleRef:
+ name: view
+ subjects:
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
+ changed_when: no
+
+- name: Set logging-elasticsearch-view-role role
+ oc_obj:
+ state: present
+ name: "logging-elasticsearch-view-role"
+ kind: rolebinding
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ delete_after: true
+
+# configmap
+- template:
+ src: elasticsearch-logging.yml.j2
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ when: es_logging_contents is undefined
+ changed_when: no
+
+- template:
+ src: elasticsearch.yml.j2
+ dest: "{{ tempdir }}/elasticsearch.yml"
+ vars:
+ allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
+ when: es_config_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{ es_logging_contents }}"
+ dest: "{{ tempdir }}/elasticsearch-logging.yml"
+ when: es_logging_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ es_config_contents }}"
+ dest: "{{ tempdir }}/elasticsearch.yml"
+ when: es_config_contents is defined
+ changed_when: no
+
+- name: Set ES configmap
+ oc_configmap:
+ state: present
+ name: "{{ elasticsearch_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ from_file:
+ elasticsearch.yml: "{{ tempdir }}/elasticsearch.yml"
+ logging.yml: "{{ tempdir }}/elasticsearch-logging.yml"
+
+
+# secret
+- name: Set ES secret
+ oc_secret:
+ state: present
+ name: "logging-elasticsearch"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
+
+# services
+- name: Set logging-{{ es_component }}-cluster service
+ oc_service:
+ state: present
+ name: "logging-{{ es_component }}-cluster"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ es_component }}"
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - port: 9300
+
+- name: Set logging-{{ es_component }} service
+ oc_service:
+ state: present
+ name: "logging-{{ es_component }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ selector:
+ component: "{{ es_component }}"
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - port: 9200
+ targetPort: "restapi"
+
+- name: Creating ES storage template
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+ - not openshift_logging_elasticsearch_pvc_dynamic
+
+- name: Creating ES storage template
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ openshift_logging_elasticsearch_pvc_size }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ annotations:
+ volume.alpha.kubernetes.io/storage-class: "dynamic"
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+ - openshift_logging_elasticsearch_pvc_dynamic
+
+- name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
+ when:
+ - openshift_logging_elasticsearch_storage_type == "pvc"
+
+- set_fact:
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}"
+ when: openshift_logging_elasticsearch_deployment_name == ""
+
+- set_fact:
+ es_deploy_name: "{{ openshift_logging_elasticsearch_deployment_name }}"
+ when: openshift_logging_elasticsearch_deployment_name != ""
+
+# DC
+- name: Set ES dc templates
+ template:
+ src: es.j2
+ dest: "{{ tempdir }}/templates/logging-es-dc.yml"
+ vars:
+ es_cluster_name: "{{ es_component }}"
+ component: "{{ es_component }}"
+ logging_component: elasticsearch
+ deploy_name: "{{ es_deploy_name }}"
+ image: "{{ openshift_logging_image_prefix }}logging-elasticsearch:{{ openshift_logging_image_version }}"
+ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
+ es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
+ es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
+ deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
+ replicas: 1
+
+- name: Set ES dc
+ oc_obj:
+ state: present
+ name: "{{ es_deploy_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
+ delete_after: true
+
+## Placeholder for migration when necessary ##
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/elasticsearch-logging.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
index 499e77fb7..377abe21f 100644
--- a/roles/openshift_logging/templates/elasticsearch-logging.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch-logging.yml.j2
@@ -1,25 +1,14 @@
# you can override this using by setting a system property, for example -Des.logger.level=DEBUG
es.logger.level: INFO
-rootLogger: ${es.logger.level}, {{root_logger}}
+rootLogger: ${es.logger.level}, console, file
logger:
# log action execution errors for easier debugging
action: WARN
-
- # deprecation logging, turn to DEBUG to see them
- deprecation: WARN, deprecation_log_file
-
# reduce the logging for aws, too much is logged under the default INFO
com.amazonaws: WARN
-
io.fabric8.elasticsearch: ${PLUGIN_LOGLEVEL}
io.fabric8.kubernetes: ${PLUGIN_LOGLEVEL}
- # aws will try to do some sketchy JMX stuff, but its not needed.
- com.amazonaws.jmx.SdkMBeanRegistrySupport: ERROR
- com.amazonaws.metrics.AwsSdkMetrics: ERROR
-
- org.apache.http: INFO
-
# gateway
#gateway: DEBUG
#index.gateway: DEBUG
@@ -39,14 +28,13 @@ logger:
additivity:
index.search.slowlog: false
index.indexing.slowlog: false
- deprecation: false
appender:
console:
type: console
layout:
type: consolePattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %.10000m%n"
+ conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
file:
type: dailyRollingFile
@@ -56,13 +44,16 @@ appender:
type: pattern
conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
- deprecation_log_file:
- type: dailyRollingFile
- file: ${path.logs}/${cluster.name}_deprecation.log
- datePattern: "'.'yyyy-MM-dd"
- layout:
- type: pattern
- conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
+ # Use the following log4j-extras RollingFileAppender to enable gzip compression of log files.
+ # For more information see https://logging.apache.org/log4j/extras/apidocs/org/apache/log4j/rolling/RollingFileAppender.html
+ #file:
+ #type: extrasRollingFile
+ #file: ${path.logs}/${cluster.name}.log
+ #rollingPolicy: timeBased
+ #rollingPolicy.FileNamePattern: ${path.logs}/${cluster.name}.log.%d{yyyy-MM-dd}.gz
+ #layout:
+ #type: pattern
+ #conversionPattern: "[%d{ISO8601}][%-5p][%-25c] %m%n"
index_search_slow_log_file:
type: dailyRollingFile
diff --git a/roles/openshift_logging/templates/elasticsearch.yml.j2 b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
index 355642cb7..58c325c8a 100644
--- a/roles/openshift_logging/templates/elasticsearch.yml.j2
+++ b/roles/openshift_logging_elasticsearch/templates/elasticsearch.yml.j2
@@ -14,8 +14,8 @@ index:
flush_threshold_period: 5m
node:
- master: true
- data: true
+ master: ${IS_MASTER}
+ data: ${HAS_DATA}
network:
host: 0.0.0.0
@@ -38,6 +38,7 @@ gateway:
io.fabric8.elasticsearch.authentication.users: ["system.logging.kibana", "system.logging.fluentd", "system.logging.curator", "system.admin"]
io.fabric8.elasticsearch.kibana.mapping.app: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
io.fabric8.elasticsearch.kibana.mapping.ops: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
+io.fabric8.elasticsearch.kibana.mapping.empty: /usr/share/elasticsearch/index_patterns/com.redhat.viaq-openshift.index-pattern.json
openshift.config:
use_common_data_model: true
diff --git a/roles/openshift_logging/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 680c16cf4..bd2289f0d 100644
--- a/roles/openshift_logging/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -8,7 +8,7 @@ metadata:
deployment: "{{deploy_name}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -29,7 +29,7 @@ spec:
serviceAccountName: aggregated-logging-elasticsearch
securityContext:
supplementalGroups:
- - {{openshift_logging_es_storage_group}}
+ - {{openshift_logging_elasticsearch_storage_group}}
{% if es_node_selector is iterable and es_node_selector | length > 0 %}
nodeSelector:
{% for key, value in es_node_selector.iteritems() %}
@@ -73,7 +73,7 @@ spec:
value: "logging-{{es_cluster_name}}"
-
name: "INSTANCE_RAM"
- value: "{{openshift_logging_es_memory_limit}}"
+ value: "{{openshift_logging_elasticsearch_memory_limit}}"
-
name: "NODE_QUORUM"
value: "{{es_node_quorum | int}}"
@@ -82,7 +82,18 @@ spec:
value: "{{es_recover_expected_nodes}}"
-
name: "RECOVER_AFTER_TIME"
- value: "{{openshift_logging_es_recover_after_time}}"
+ value: "{{openshift_logging_elasticsearch_recover_after_time}}"
+ -
+ name: "READINESS_PROBE_TIMEOUT"
+ value: "30"
+ -
+ name: "IS_MASTER"
+ value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
+
+ -
+ name: "HAS_DATA"
+ value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}"
+
volumeMounts:
- name: elasticsearch
mountPath: /etc/elasticsearch/secret
@@ -96,8 +107,8 @@ spec:
exec:
command:
- "/usr/share/elasticsearch/probe/readiness.sh"
- initialDelaySeconds: 5
- timeoutSeconds: 4
+ initialDelaySeconds: 10
+ timeoutSeconds: 30
periodSeconds: 5
volumes:
- name: elasticsearch
@@ -107,4 +118,12 @@ spec:
configMap:
name: logging-elasticsearch
- name: elasticsearch-storage
-{% include 'es-storage-'+ es_storage['kind'] + '.partial' %}
+{% if openshift_logging_elasticsearch_storage_type == 'pvc' %}
+ persistentVolumeClaim:
+ claimName: {{ openshift_logging_elasticsearch_pvc_name }}
+{% elif openshift_logging_elasticsearch_storage_type == 'hostmount' %}
+ hostPath:
+ path: {{ openshift_logging_elasticsearch_hostmount_path }}
+{% else %}
+ emptydir: {}
+{% endif %}
diff --git a/roles/openshift_logging/templates/pvc.j2 b/roles/openshift_logging_elasticsearch/templates/pvc.j2
index 07d81afff..f19a3a750 100644
--- a/roles/openshift_logging/templates/pvc.j2
+++ b/roles/openshift_logging_elasticsearch/templates/pvc.j2
@@ -1,7 +1,7 @@
apiVersion: v1
kind: PersistentVolumeClaim
metadata:
- name: "{{obj_name}}"
+ name: {{obj_name}}
labels:
logging-infra: support
{% if annotations is defined %}
diff --git a/roles/openshift_logging/templates/rolebinding.j2 b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
index fcd4e87cc..fcd4e87cc 100644
--- a/roles/openshift_logging/templates/rolebinding.j2
+++ b/roles/openshift_logging_elasticsearch/templates/rolebinding.j2
diff --git a/roles/openshift_logging_elasticsearch/vars/main.yml b/roles/openshift_logging_elasticsearch/vars/main.yml
new file mode 100644
index 000000000..7a1f5048b
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/main.yml
@@ -0,0 +1,12 @@
+---
+__latest_es_version: "3_5"
+__allowed_es_versions: ["3_5", "3_6"]
+__allowed_es_types: ["data-master", "data-client", "master", "client"]
+
+# TODO: integrate these
+openshift_master_config_dir: "{{ openshift.common.config_base }}/master"
+es_node_quorum: "{{ openshift_logging_elasticsearch_replica_count | int/2 + 1 }}"
+es_min_masters_default: "{{ (openshift_logging_elasticsearch_replica_count | int / 2 | round(0,'floor') + 1) | int }}"
+es_min_masters: "{{ (openshift_logging_elasticsearch_replica_count == 1) | ternary(1, es_min_masters_default) }}"
+es_recover_after_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
+es_recover_expected_nodes: "{{ openshift_logging_elasticsearch_replica_count | int }}"
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
new file mode 100644
index 000000000..228196d74
--- /dev/null
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -0,0 +1,59 @@
+---
+### General logging settings
+openshift_logging_fluentd_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_fluentd_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_fluentd_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_fluentd_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_fluentd_namespace: logging
+
+### Common settings
+openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
+openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_hosts: ['--all']
+
+# float time in seconds to wait between node labelling
+openshift_logging_fluentd_label_delay: '0.5'
+
+# Fluentd deployment type
+openshift_logging_fluentd_deployment_type: "hosted"
+
+### Used by "hosted" and "secure-host" deployments
+
+# Destination for the application based logs
+openshift_logging_fluentd_app_host: "logging-es"
+openshift_logging_fluentd_app_port: 9200
+# Destination for the operations based logs
+openshift_logging_fluentd_ops_host: "{{ openshift_logging_fluentd_app_host }}"
+openshift_logging_fluentd_ops_port: "{{ openshift_logging_fluentd_app_port }}"
+
+### Used by "hosted" and "secure-aggregator" deployments
+#openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal }}"
+openshift_logging_fluentd_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
+openshift_logging_fluentd_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+
+openshift_logging_fluentd_app_client_cert: /etc/fluent/keys/cert
+openshift_logging_fluentd_app_client_key: /etc/fluent/keys/key
+openshift_logging_fluentd_app_ca: /etc/fluent/keys/ca
+openshift_logging_fluentd_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_fluentd_ops_client_key: /etc/fluent/keys/key
+openshift_logging_fluentd_ops_ca: /etc/fluent/keys/ca
+
+
+# used by "secure-host" and "secure-aggregator" deployments
+openshift_logging_fluentd_shared_key: "{{ 'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789' | random_word(128) }}"
+openshift_logging_fluentd_aggregating_port: 24284
+openshift_logging_fluentd_aggregating_host: "${HOSTNAME}"
+openshift_logging_fluentd_aggregating_secure: "no"
+openshift_logging_fluentd_aggregating_strict: "no"
+openshift_logging_fluentd_aggregating_cert_path: none
+openshift_logging_fluentd_aggregating_key_path: none
+openshift_logging_fluentd_aggregating_passphrase: none
+
+### Deprecating in 3.6
+openshift_logging_fluentd_es_copy: false
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#fluentd_config_contents:
+#fluentd_throttle_contents:
+#fluentd_secureforward_contents:
diff --git a/roles/openshift_logging/files/fluentd-throttle-config.yaml b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml
index 375621ff1..375621ff1 100644
--- a/roles/openshift_logging/files/fluentd-throttle-config.yaml
+++ b/roles/openshift_logging_fluentd/files/fluentd-throttle-config.yaml
diff --git a/roles/openshift_logging/files/secure-forward.conf b/roles/openshift_logging_fluentd/files/secure-forward.conf
index f4483df79..f4483df79 100644
--- a/roles/openshift_logging/files/secure-forward.conf
+++ b/roles/openshift_logging_fluentd/files/secure-forward.conf
diff --git a/roles/openshift_logging_fluentd/meta/main.yaml b/roles/openshift_logging_fluentd/meta/main.yaml
new file mode 100644
index 000000000..2003aacb2
--- /dev/null
+++ b/roles/openshift_logging_fluentd/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Fluentd Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_fluentd/tasks/determine_version.yaml b/roles/openshift_logging_fluentd/tasks/determine_version.yaml
new file mode 100644
index 000000000..a1ba71b1b
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ fluentd_version: "{{ __latest_fluentd_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: fluentd_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Fluentd
+ when: fluentd_version not in __allowed_fluentd_versions
diff --git a/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
new file mode 100644
index 000000000..e92a35f27
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/label_and_wait.yaml
@@ -0,0 +1,10 @@
+---
+- name: Label {{ node }} for Fluentd deployment
+ oc_label:
+ name: "{{ node }}"
+ kind: node
+ state: add
+ labels: "{{ openshift_logging_fluentd_nodeselector | oo_dict_to_list_of_dict }}"
+
+# wait half a second between labels
+- local_action: command sleep {{ openshift_logging_fluentd_label_delay | default('.5') }}
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
new file mode 100644
index 000000000..8194223e8
--- /dev/null
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -0,0 +1,206 @@
+---
+- fail:
+ msg: Only one Fluentd nodeselector key pair should be provided
+ when: "{{ openshift_logging_fluentd_nodeselector.keys() | count }} > 1"
+
+- fail:
+ msg: Application logs destination is required
+ when: not openshift_logging_fluentd_app_host or openshift_logging_fluentd_app_host == ''
+
+- fail:
+ msg: Operations logs destination is required
+ when: not openshift_logging_fluentd_ops_host or openshift_logging_fluentd_ops_host == ''
+
+- fail:
+ msg: Invalid deployment type, one of ['hosted', 'secure-aggregator', 'secure-host'] allowed
+ when: not openshift_logging_fluentd_deployment_type in __allowed_fluentd_types
+
+- include: determine_version.yaml
+
+- set_fact:
+ openshift_logging_fluentd_use_journal: "{{ openshift_hosted_logging_use_journal }}"
+ when:
+ - openshift_hosted_logging_use_journal is defined
+ - openshift_logging_fluentd_use_journal is not defined
+
+- set_fact:
+ openshift_logging_fluentd_use_journal: "{{ __fluentd_use_journal }}"
+ when:
+ - openshift_hosted_logging_use_journal is not defined
+ - openshift_logging_fluentd_use_journal is not defined
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Fluentd service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Fluentd service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# set service account scc
+- name: Set privileged permissions for Fluentd
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_fluentd_namespace }}:aggregated-logging-fluentd"
+
+# set service account permissions
+- name: Set cluster-reader permissions for Fluentd
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_fluentd_namespace }}:aggregated-logging-fluentd"
+
+# create Fluentd configmap
+- template:
+ src: fluent.conf.j2
+ dest: "{{ tempdir }}/fluent.conf"
+ vars:
+ deploy_type: "{{ openshift_logging_fluentd_deployment_type }}"
+ when: fluentd_config_contents is undefined
+ changed_when: no
+
+- copy:
+ src: fluentd-throttle-config.yaml
+ dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is undefined
+ changed_when: no
+
+- copy:
+ src: secure-forward.conf
+ dest: "{{ tempdir }}/secure-forward.conf"
+ when: fluentd_securefoward_contents is undefined
+
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_config_contents }}"
+ dest: "{{ tempdir }}/fluent.conf"
+ when: fluentd_config_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_throttle_contents }}"
+ dest: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ when: fluentd_throttle_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{ fluentd_secureforward_contents }}"
+ dest: "{{ tempdir }}/secure-forward.conf"
+ when: fluentd_secureforward_contents is defined
+ changed_when: no
+
+- name: Set Fluentd configmap
+ oc_configmap:
+ state: present
+ name: "logging-fluentd"
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ from_file:
+ fluent.conf: "{{ tempdir }}/fluent.conf"
+ throttle-config.yaml: "{{ tempdir }}/fluentd-throttle-config.yaml"
+ secure-forward.conf: "{{ tempdir }}/secure-forward.conf"
+
+# create Fluentd secret
+# TODO: add aggregation secrets if necessary
+- name: Set logging-fluentd secret
+ oc_secret:
+ state: present
+ name: logging-fluentd
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.fluentd.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.fluentd.crt"
+
+# create Fluentd daemonset
+
+# this should change based on the type of fluentd deployment to be done...
+# TODO: pass in aggregation configurations
+- name: Generate logging-fluentd daemonset definition
+ template:
+ src: fluentd.j2
+ dest: "{{ tempdir }}/templates/logging-fluentd.yaml"
+ vars:
+ daemonset_name: logging-fluentd
+ daemonset_component: fluentd
+ daemonset_container_name: fluentd-elasticsearch
+ daemonset_serviceAccount: aggregated-logging-fluentd
+ app_host: "{{ openshift_logging_fluentd_app_host }}"
+ app_port: "{{ openshift_logging_fluentd_app_port }}"
+ ops_host: "{{ openshift_logging_fluentd_ops_host }}"
+ ops_port: "{{ openshift_logging_fluentd_ops_port }}"
+ fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
+ fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ check_mode: no
+ changed_when: no
+
+- name: Set logging-fluentd daemonset
+ oc_obj:
+ state: present
+ name: logging-fluentd
+ namespace: "{{ openshift_logging_fluentd_namespace }}"
+ kind: daemonset
+ files:
+ - "{{ tempdir }}/templates/logging-fluentd.yaml"
+ delete_after: true
+
+# Scale up Fluentd
+- name: Retrieve list of Fluentd hosts
+ oc_obj:
+ state: list
+ kind: node
+ when: "'--all' in openshift_logging_fluentd_hosts"
+ register: fluentd_hosts
+
+- name: Set openshift_logging_fluentd_hosts
+ set_fact:
+ openshift_logging_fluentd_hosts: "{{ fluentd_hosts.results.results[0]['items'] | map(attribute='metadata.name') | list }}"
+ when: "'--all' in openshift_logging_fluentd_hosts"
+
+- include: label_and_wait.yaml
+ vars:
+ node: "{{ fluentd_host }}"
+ with_items: "{{ openshift_logging_fluentd_hosts }}"
+ loop_control:
+ loop_var: fluentd_host
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging_fluentd/templates/fluent.conf.j2 b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
new file mode 100644
index 000000000..46de94d60
--- /dev/null
+++ b/roles/openshift_logging_fluentd/templates/fluent.conf.j2
@@ -0,0 +1,78 @@
+# This file is the fluentd configuration entrypoint. Edit with care.
+
+@include configs.d/openshift/system.conf
+
+# In each section below, pre- and post- includes don't include anything initially;
+# they exist to enable future additions to openshift conf as needed.
+
+## sources
+{% if deploy_type in ['hosted', 'secure-aggregator'] %}
+## ordered so that syslog always runs last...
+@include configs.d/openshift/input-pre-*.conf
+@include configs.d/dynamic/input-docker-*.conf
+@include configs.d/dynamic/input-syslog-*.conf
+@include configs.d/openshift/input-post-*.conf
+##
+{% else %}
+<source>
+ @type secure_forward
+ @label @INGRESS
+
+ self_hostname ${HOSTNAME}
+ bind 0.0.0.0
+ port {{openshift_logging_fluentd_aggregating_port}}
+
+ shared_key {{openshift_logging_fluentd_shared_key}}
+
+ secure {{openshift_logging_fluentd_aggregating_secure}}
+ enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}}
+ ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}}
+ ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}}
+ ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}}
+
+ <client>
+ host {{openshift_logging_fluentd_aggregating_host}}
+ </client>
+</source>
+{% endif %}
+
+<label @INGRESS>
+{% if deploy_type in ['hosted', 'secure-host'] %}
+## filters
+ @include configs.d/openshift/filter-pre-*.conf
+ @include configs.d/openshift/filter-retag-journal.conf
+ @include configs.d/openshift/filter-k8s-meta.conf
+ @include configs.d/openshift/filter-kibana-transform.conf
+ @include configs.d/openshift/filter-k8s-flatten-hash.conf
+ @include configs.d/openshift/filter-k8s-record-transform.conf
+ @include configs.d/openshift/filter-syslog-record-transform.conf
+ @include configs.d/openshift/filter-viaq-data-model.conf
+ @include configs.d/openshift/filter-post-*.conf
+##
+
+## matches
+ @include configs.d/openshift/output-pre-*.conf
+ @include configs.d/openshift/output-operations.conf
+ @include configs.d/openshift/output-applications.conf
+ # no post - applications.conf matches everything left
+##
+{% else %}
+ <match **>
+ @type secure_forward
+
+ self_hostname ${HOSTNAME}
+ shared_key {{openshift_logging_fluentd_shared_key}}
+
+ secure {{openshift_logging_fluentd_aggregating_secure}}
+ enable_strict_verification {{openshift_logging_fluentd_aggregating_strict}}
+ ca_cert_path {{openshift_logging_fluentd_aggregating_cert_path}}
+ ca_private_key_path {{openshift_logging_fluentd_aggregating_key_path}}
+ ca_private_key_passphrase {{openshift_logging_fluentd_aggregating_passphrase}}
+
+ <server>
+ host {{openshift_logging_fluentd_aggregating_host}}
+ port {{openshift_logging_fluentd_aggregating_port}}
+ </server>
+ </match>
+{% endif %}
+</label>
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
new file mode 100644
index 000000000..e185938e3
--- /dev/null
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -0,0 +1,123 @@
+apiVersion: extensions/v1beta1
+kind: "DaemonSet"
+metadata:
+ name: "{{ daemonset_name }}"
+ labels:
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ logging-infra: "{{ daemonset_component }}"
+spec:
+ selector:
+ matchLabels:
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ updateStrategy:
+ type: RollingUpdate
+ rollingUpdate:
+ minReadySeconds: 600
+ template:
+ metadata:
+ name: "{{ daemonset_container_name }}"
+ labels:
+ logging-infra: "{{ daemonset_component }}"
+ provider: openshift
+ component: "{{ daemonset_component }}"
+ spec:
+ serviceAccountName: "{{ daemonset_serviceAccount }}"
+ nodeSelector:
+ {{ fluentd_nodeselector_key }}: "{{ fluentd_nodeselector_value }}"
+ containers:
+ - name: "{{ daemonset_container_name }}"
+ image: "{{ openshift_logging_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_image_version }}"
+ imagePullPolicy: Always
+ securityContext:
+ privileged: true
+ resources:
+ limits:
+ cpu: {{ openshift_logging_fluentd_cpu_limit }}
+ memory: {{ openshift_logging_fluentd_memory_limit }}
+ volumeMounts:
+ - name: runlogjournal
+ mountPath: /run/log/journal
+ - name: varlog
+ mountPath: /var/log
+ - name: varlibdockercontainers
+ mountPath: /var/lib/docker/containers
+ readOnly: true
+ - name: config
+ mountPath: /etc/fluent/configs.d/user
+ readOnly: true
+ - name: certs
+ mountPath: /etc/fluent/keys
+ readOnly: true
+ - name: dockerhostname
+ mountPath: /etc/docker-hostname
+ readOnly: true
+ - name: localtime
+ mountPath: /etc/localtime
+ readOnly: true
+ - name: dockercfg
+ mountPath: /etc/sysconfig/docker
+ readOnly: true
+ - name: dockerdaemoncfg
+ mountPath: /etc/docker
+ readOnly: true
+ env:
+ - name: "K8S_HOST_URL"
+ value: "{{ openshift_logging_fluentd_master_url }}"
+ - name: "ES_HOST"
+ value: "{{ app_host }}"
+ - name: "ES_PORT"
+ value: "{{ app_port }}"
+ - name: "ES_CLIENT_CERT"
+ value: "{{ openshift_logging_fluentd_app_client_cert }}"
+ - name: "ES_CLIENT_KEY"
+ value: "{{ openshift_logging_fluentd_app_client_key }}"
+ - name: "ES_CA"
+ value: "{{ openshift_logging_fluentd_app_ca }}"
+ - name: "OPS_HOST"
+ value: "{{ ops_host }}"
+ - name: "OPS_PORT"
+ value: "{{ ops_port }}"
+ - name: "OPS_CLIENT_CERT"
+ value: "{{ openshift_logging_fluentd_ops_client_cert }}"
+ - name: "OPS_CLIENT_KEY"
+ value: "{{ openshift_logging_fluentd_ops_client_key }}"
+ - name: "OPS_CA"
+ value: "{{ openshift_logging_fluentd_ops_ca }}"
+ - name: "ES_COPY"
+ value: "false"
+ - name: "USE_JOURNAL"
+ value: "{{ openshift_logging_fluentd_use_journal | lower }}"
+ - name: "JOURNAL_SOURCE"
+ value: "{{ openshift_logging_fluentd_journal_source | default('') }}"
+ - name: "JOURNAL_READ_FROM_HEAD"
+ value: "{{ openshift_logging_fluentd_journal_read_from_head | lower }}"
+ volumes:
+ - name: runlogjournal
+ hostPath:
+ path: /run/log/journal
+ - name: varlog
+ hostPath:
+ path: /var/log
+ - name: varlibdockercontainers
+ hostPath:
+ path: /var/lib/docker/containers
+ - name: config
+ configMap:
+ name: logging-fluentd
+ - name: certs
+ secret:
+ secretName: logging-fluentd
+ - name: dockerhostname
+ hostPath:
+ path: /etc/hostname
+ - name: localtime
+ hostPath:
+ path: /etc/localtime
+ - name: dockercfg
+ hostPath:
+ path: /etc/sysconfig/docker
+ - name: dockerdaemoncfg
+ hostPath:
+ path: /etc/docker
diff --git a/roles/openshift_logging_fluentd/vars/main.yml b/roles/openshift_logging_fluentd/vars/main.yml
new file mode 100644
index 000000000..f601b738e
--- /dev/null
+++ b/roles/openshift_logging_fluentd/vars/main.yml
@@ -0,0 +1,5 @@
+---
+__latest_fluentd_version: "3_5"
+__allowed_fluentd_versions: ["3_5", "3_6"]
+__allowed_fluentd_types: ["hosted", "secure-aggregator", "secure-host"]
+__fluentd_use_journal: "{{ (docker_log_driver == 'journald') | ternary(True, False) if docker_log_driver is defined else (openshift.docker.log_driver == 'journald') | ternary(True, False) if openshift.docker.log_driver is defined else openshift.docker.options | search('--log-driver=journald') if openshift.docker.options is defined else default(omit) }}"
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
new file mode 100644
index 000000000..23337bcd2
--- /dev/null
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -0,0 +1,41 @@
+---
+### Common settings
+openshift_logging_kibana_master_url: "https://kubernetes.default.svc.cluster.local"
+openshift_logging_kibana_master_public_url: "https://kubernetes.default.svc.cluster.local"
+openshift_logging_kibana_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_kibana_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_kibana_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_kibana_namespace: logging
+
+openshift_logging_kibana_nodeselector: ""
+openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_memory_limit: 736Mi
+
+openshift_logging_kibana_hostname: "kibana.router.default.svc.cluster.local"
+
+openshift_logging_kibana_es_host: "logging-es"
+openshift_logging_kibana_es_port: 9200
+
+openshift_logging_kibana_replicas: 1
+openshift_logging_kibana_edge_term_policy: Redirect
+
+# this is used to determine if this is an operations deployment or a non-ops deployment
+# simply used for naming purposes
+openshift_logging_kibana_ops_deployment: false
+
+# Proxy settings
+openshift_logging_kibana_proxy_debug: false
+openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_memory_limit: 96Mi
+
+#The absolute path on the control node to the cert file to use
+#for the public facing kibana certs
+openshift_logging_kibana_cert: ""
+
+#The absolute path on the control node to the key file to use
+#for the public facing kibana certs
+openshift_logging_kibana_key: ""
+
+#The absolute path on the control node to the CA file to use
+#for the public facing kibana certs
+openshift_logging_kibana_ca: ""
diff --git a/roles/openshift_logging_kibana/meta/main.yaml b/roles/openshift_logging_kibana/meta/main.yaml
new file mode 100644
index 000000000..89e08abc0
--- /dev/null
+++ b/roles/openshift_logging_kibana/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Kibana Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_kibana/tasks/determine_version.yaml b/roles/openshift_logging_kibana/tasks/determine_version.yaml
new file mode 100644
index 000000000..53e15af5f
--- /dev/null
+++ b/roles/openshift_logging_kibana/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ kibana_version: "{{ __latest_kibana_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: kibana_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for Kibana
+ when: kibana_version not in __allowed_kibana_versions
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
new file mode 100644
index 000000000..bae55ffaa
--- /dev/null
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -0,0 +1,256 @@
+---
+# fail is we don't have an endpoint for ES to connect to?
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+# This may not be necessary in this role
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Kibana service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Kibana service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+- set_fact:
+ kibana_name: "{{ 'logging-kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+ kibana_component: "{{ 'kibana' ~ ( (openshift_logging_kibana_ops_deployment | default(false) | bool) | ternary('-ops', '')) }}"
+
+# Check {{ generated_certs_dir }} for session_secret and oauth_secret
+- name: Checking for session_secret
+ stat: path="{{generated_certs_dir}}/session_secret"
+ register: session_secret_file
+
+- name: Checking for oauth_secret
+ stat: path="{{generated_certs_dir}}/oauth_secret"
+ register: oauth_secret_file
+
+# gen session_secret if necessary
+- name: Generate session secret
+ copy:
+ content: "{{ 200 | oo_random_word }}"
+ dest: "{{ generated_certs_dir }}/session_secret"
+ when:
+ - not session_secret_file.stat.exists
+
+# gen oauth_secret if necessary
+- name: Generate oauth secret
+ copy:
+ content: "{{ 64 | oo_random_word }}"
+ dest: "{{ generated_certs_dir }}/oauth_secret"
+ when:
+ - not oauth_secret_file.stat.exists
+
+- name: Retrieving the cert to use when generating secrets for the logging components
+ slurp:
+ src: "{{ generated_certs_dir }}/{{ item.file }}"
+ register: key_pairs
+ with_items:
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "kibana_internal_key", file: "kibana-internal.key"}
+ - { name: "kibana_internal_cert", file: "kibana-internal.crt"}
+ - { name: "server_tls", file: "server-tls.json"}
+ - { name: "session_secret", file: "session_secret" }
+ - { name: "oauth_secret", file: "oauth_secret" }
+
+# services
+- name: Set {{ kibana_name }} service
+ oc_service:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_kibana_namespace }}"
+ selector:
+ component: "{{ kibana_component }}"
+ provider: openshift
+ # pending #4091
+ #labels:
+ #- logging-infra: 'support'
+ ports:
+ - port: 443
+ targetPort: "oaproxy"
+
+# create routes
+# TODO: set up these certs differently?
+- set_fact:
+ kibana_key: "{{ lookup('file', openshift_logging_kibana_key) | b64encode }}"
+ when: "{{ openshift_logging_kibana_key | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_cert: "{{ lookup('file', openshift_logging_kibana_cert) | b64encode }}"
+ when: "{{ openshift_logging_kibana_cert | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_ca: "{{ lookup('file', openshift_logging_kibana_ca) | b64encode }}"
+ when: "{{ openshift_logging_kibana_ca | trim | length > 0 }}"
+ changed_when: false
+
+- set_fact:
+ kibana_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
+ when: kibana_ca is not defined
+ changed_when: false
+
+- name: Generating Kibana route template
+ template:
+ src: route_reencrypt.j2
+ dest: "{{ tempdir }}/templates/kibana-route.yaml"
+ vars:
+ obj_name: "{{ kibana_name }}"
+ route_host: "{{ openshift_logging_kibana_hostname }}"
+ service_name: "{{ kibana_name }}"
+ tls_key: "{{ kibana_key | default('') | b64decode }}"
+ tls_cert: "{{ kibana_cert | default('') | b64decode }}"
+ tls_ca_cert: "{{ kibana_ca | b64decode }}"
+ tls_dest_ca_cert: "{{ key_pairs | entry_from_named_pair('ca_file') | b64decode }}"
+ edge_term_policy: "{{ openshift_logging_kibana_edge_term_policy | default('') }}"
+ labels:
+ component: support
+ logging-infra: support
+ provider: openshift
+ changed_when: no
+
+# This currently has an issue if the host name changes
+- name: Setting Kibana route
+ oc_obj:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: route
+ files:
+ - "{{ tempdir }}/templates/kibana-route.yaml"
+
+# preserve list of current hostnames
+- name: Get current oauthclient hostnames
+ oc_obj:
+ state: list
+ name: kibana-proxy
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: oauthclient
+ register: oauth_client_list
+
+- set_fact: proxy_hostnames={{ oauth_client_list.results.results[0].redirectURIs | default ([]) + ['https://' ~ openshift_logging_kibana_hostname] }}
+
+# create oauth client
+- name: Create oauth-client template
+ template:
+ src: oauth-client.j2
+ dest: "{{ tempdir }}/templates/oauth-client.yml"
+ vars:
+ kibana_hostnames: "{{ proxy_hostnames | unique }}"
+ secret: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}"
+
+- name: Set kibana-proxy oauth-client
+ oc_obj:
+ state: present
+ name: "kibana-proxy"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: oauthclient
+ files:
+ - "{{ tempdir }}/templates/oauth-client.yml"
+ delete_after: true
+
+# create Kibana secret
+- name: Set Kibana secret
+ oc_secret:
+ state: present
+ name: "logging-kibana"
+ namespace: "{{ openshift_logging_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.kibana.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.kibana.crt"
+
+# create Kibana-proxy secret
+- name: Set Kibana Proxy secret
+ oc_secret:
+ state: present
+ name: "logging-kibana-proxy"
+ namespace: "{{ openshift_logging_namespace }}"
+ # TODO: when possible to have both files and contents for oc_secret use this
+ #files:
+ #- name: server-key
+ # path: "{{ generated_certs_dir }}/kibana-internal.key"
+ #- name: server-cert
+ # path: "{{ generated_certs_dir }}/kibana-internal.crt"
+ #- name: server-tls.json
+ # path: "{{ generated_certs_dir }}/server-tls.json"
+ contents:
+ - path: oauth-secret
+ data: "{{ key_pairs | entry_from_named_pair('oauth_secret') | b64decode }}"
+ - path: session-secret
+ data: "{{ key_pairs | entry_from_named_pair('session_secret') | b64decode }}"
+ - path: server-key
+ data: "{{ key_pairs | entry_from_named_pair('kibana_internal_key') | b64decode }}"
+ - path: server-cert
+ data: "{{ key_pairs | entry_from_named_pair('kibana_internal_cert') | b64decode }}"
+ - path: server-tls.json
+ data: "{{ key_pairs | entry_from_named_pair('server_tls') | b64decode }}"
+
+# create Kibana DC
+- name: Generate Kibana DC template
+ template:
+ src: kibana.j2
+ dest: "{{ tempdir }}/templates/kibana-dc.yaml"
+ vars:
+ component: "{{ kibana_component }}"
+ logging_component: kibana
+ deploy_name: "{{ kibana_name }}"
+ image: "{{ openshift_logging_image_prefix }}logging-kibana:{{ openshift_logging_image_version }}"
+ proxy_image: "{{ openshift_logging_image_prefix }}logging-auth-proxy:{{ openshift_logging_image_version }}"
+ es_host: "{{ openshift_logging_kibana_es_host }}"
+ es_port: "{{ openshift_logging_kibana_es_port }}"
+ kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
+ kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
+ kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
+ replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
+ kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
+
+- name: Set Kibana DC
+ oc_obj:
+ state: present
+ name: "{{ kibana_name }}"
+ namespace: "{{ openshift_logging_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/kibana-dc.yaml"
+ delete_after: true
+
+# update master configs?
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index 25fab9ac4..f8043812b 100644
--- a/roles/openshift_logging/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -1,17 +1,17 @@
apiVersion: "v1"
kind: "DeploymentConfig"
metadata:
- name: "{{deploy_name}}"
+ name: "{{ deploy_name }}"
labels:
provider: openshift
- component: "{{component}}"
- logging-infra: "{{logging_component}}"
+ component: "{{ component }}"
+ logging-infra: "{{ logging_component }}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{ replicas | default(1) }}
selector:
provider: openshift
- component: "{{component}}"
- logging-infra: "{{logging_component}}"
+ component: "{{ component }}"
+ logging-infra: "{{ logging_component }}"
strategy:
rollingParams:
intervalSeconds: 1
@@ -20,37 +20,39 @@ spec:
type: Rolling
template:
metadata:
- name: "{{deploy_name}}"
+ name: "{{ deploy_name }}"
labels:
- logging-infra: "{{logging_component}}"
+ logging-infra: "{{ logging_component }}"
provider: openshift
- component: "{{component}}"
+ component: "{{ component }}"
spec:
serviceAccountName: aggregated-logging-kibana
{% if kibana_node_selector is iterable and kibana_node_selector | length > 0 %}
nodeSelector:
{% for key, value in kibana_node_selector.iteritems() %}
- {{key}}: "{{value}}"
+ {{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
containers:
-
name: "kibana"
- image: {{image}}
+ image: {{ image }}
imagePullPolicy: Always
-{% if (kibana_memory_limit is defined and kibana_memory_limit is not none) or (kibana_cpu_limit is defined and kibana_cpu_limit is not none) %}
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
resources:
limits:
-{% if kibana_cpu_limit is not none %}
- cpu: "{{kibana_cpu_limit}}"
+{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
+ cpu: "{{ kibana_cpu_limit }}"
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+ memory: "{{ kibana_memory_limit }}"
{% endif %}
- memory: "{{kibana_memory_limit | default('736Mi') }}"
{% endif %}
env:
- name: "ES_HOST"
- value: "{{es_host}}"
+ value: "{{ es_host }}"
- name: "ES_PORT"
- value: "{{es_port}}"
+ value: "{{ es_port }}"
-
name: "KIBANA_MEMORY_LIMIT"
valueFrom:
@@ -61,17 +63,26 @@ spec:
- name: kibana
mountPath: /etc/kibana/keys
readOnly: true
+ readinessProbe:
+ exec:
+ command:
+ - "/usr/share/kibana/probe/readiness.sh"
+ initialDelaySeconds: 5
+ timeoutSeconds: 4
+ periodSeconds: 5
-
name: "kibana-proxy"
- image: {{proxy_image}}
+ image: {{ proxy_image }}
imagePullPolicy: Always
-{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none) or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none) %}
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
resources:
limits:
-{% if kibana_proxy_cpu_limit is not none %}
- cpu: "{{kibana_proxy_cpu_limit}}"
+{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
+ cpu: "{{ kibana_proxy_cpu_limit }}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+ memory: "{{ kibana_proxy_memory_limit }}"
{% endif %}
- memory: "{{kibana_proxy_memory_limit | default('96Mi') }}"
{% endif %}
ports:
-
@@ -92,19 +103,19 @@ spec:
value: kibana-proxy
-
name: "OAP_MASTER_URL"
- value: {{openshift_logging_master_url}}
+ value: {{ openshift_logging_kibana_master_url }}
-
name: "OAP_PUBLIC_MASTER_URL"
- value: {{openshift_logging_master_public_url}}
+ value: {{ openshift_logging_kibana_master_public_url }}
-
name: "OAP_LOGOUT_REDIRECT"
- value: {{openshift_logging_master_public_url}}/console/logout
+ value: {{ openshift_logging_kibana_master_public_url }}/console/logout
-
name: "OAP_MASTER_CA_FILE"
value: "/var/run/secrets/kubernetes.io/serviceaccount/ca.crt"
-
name: "OAP_DEBUG"
- value: "{{openshift_logging_kibana_proxy_debug}}"
+ value: "{{ openshift_logging_kibana_proxy_debug }}"
-
name: "OAP_OAUTH_SECRET_FILE"
value: "/secret/oauth-secret"
diff --git a/roles/openshift_logging/templates/oauth-client.j2 b/roles/openshift_logging_kibana/templates/oauth-client.j2
index 41d3123cb..c80ff3d30 100644
--- a/roles/openshift_logging/templates/oauth-client.j2
+++ b/roles/openshift_logging_kibana/templates/oauth-client.j2
@@ -4,10 +4,11 @@ metadata:
name: kibana-proxy
labels:
logging-infra: support
-secret: {{secret}}
+secret: {{ secret }}
redirectURIs:
-- https://{{openshift_logging_kibana_hostname}}
-- https://{{openshift_logging_kibana_ops_hostname}}
+{% for host in kibana_hostnames %}
+- {{ host }}
+{% endfor %}
scopeRestrictions:
- literals:
- user:info
diff --git a/roles/openshift_logging/templates/route_reencrypt.j2 b/roles/openshift_logging_kibana/templates/route_reencrypt.j2
index cf8a9e65f..cf8a9e65f 100644
--- a/roles/openshift_logging/templates/route_reencrypt.j2
+++ b/roles/openshift_logging_kibana/templates/route_reencrypt.j2
diff --git a/roles/openshift_logging_kibana/vars/main.yml b/roles/openshift_logging_kibana/vars/main.yml
new file mode 100644
index 000000000..87b281c4b
--- /dev/null
+++ b/roles/openshift_logging_kibana/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_kibana_version: "3_5"
+__allowed_kibana_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
new file mode 100644
index 000000000..10fa4372c
--- /dev/null
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -0,0 +1,48 @@
+---
+### General logging settings
+openshift_logging_mux_image_prefix: "{{ openshift_hosted_logging_deployer_prefix | default('docker.io/openshift/origin-') }}"
+openshift_logging_mux_image_version: "{{ openshift_hosted_logging_deployer_version | default('latest') }}"
+openshift_logging_mux_image_pull_secret: "{{ openshift_hosted_logging_image_pull_secret | default('') }}"
+openshift_logging_mux_master_url: "https://kubernetes.default.svc.{{ openshift.common.dns_domain }}"
+openshift_logging_mux_master_public_url: "{{ openshift_hosted_logging_master_public_url | default('https://' + openshift.common.public_hostname + ':' ~ (openshift_master_api_port | default('8443', true))) }}"
+openshift_logging_mux_namespace: logging
+
+### Common settings
+openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
+openshift_logging_mux_cpu_limit: 500m
+openshift_logging_mux_memory_limit: 1Gi
+
+openshift_logging_mux_replicas: 1
+
+# Destination for the application based logs
+openshift_logging_mux_app_host: "logging-es"
+openshift_logging_mux_app_port: 9200
+# Destination for the operations based logs
+openshift_logging_mux_ops_host: "{{ openshift_logging_mux_app_host }}"
+openshift_logging_mux_ops_port: "{{ openshift_logging_mux_app_port }}"
+
+### Used by "hosted" and "secure-aggregator" deployments
+openshift_logging_mux_use_journal: "{{ openshift_hosted_logging_use_journal | default('') }}"
+openshift_logging_mux_journal_source: "{{ openshift_hosted_logging_journal_source | default('') }}"
+openshift_logging_mux_journal_read_from_head: "{{ openshift_hosted_logging_journal_read_from_head | default('') }}"
+
+openshift_logging_mux_allow_external: False
+openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
+openshift_logging_mux_port: 24284
+# the namespace to use for undefined projects should come first, followed by any
+# additional namespaces to create by default - users will typically not need to set this
+openshift_logging_mux_default_namespaces: ["mux-undefined"]
+# extra namespaces to create for mux clients - users will need to set this
+openshift_logging_mux_namespaces: []
+
+openshift_logging_mux_app_client_cert: /etc/fluent/keys/cert
+openshift_logging_mux_app_client_key: /etc/fluent/keys/key
+openshift_logging_mux_app_ca: /etc/fluent/keys/ca
+openshift_logging_mux_ops_client_cert: /etc/fluent/keys/cert
+openshift_logging_mux_ops_client_key: /etc/fluent/keys/key
+openshift_logging_mux_ops_ca: /etc/fluent/keys/ca
+
+# following can be uncommented to provide values for configmaps -- take care when providing file contents as it may cause your cluster to not operate correctly
+#mux_config_contents:
+#mux_throttle_contents:
+#mux_secureforward_contents:
diff --git a/roles/openshift_logging/files/fluent.conf b/roles/openshift_logging_mux/files/fluent.conf
index aeaa705ee..aeaa705ee 100644
--- a/roles/openshift_logging/files/fluent.conf
+++ b/roles/openshift_logging_mux/files/fluent.conf
diff --git a/roles/openshift_logging_mux/files/secure-forward.conf b/roles/openshift_logging_mux/files/secure-forward.conf
new file mode 100644
index 000000000..f4483df79
--- /dev/null
+++ b/roles/openshift_logging_mux/files/secure-forward.conf
@@ -0,0 +1,24 @@
+# @type secure_forward
+
+# self_hostname ${HOSTNAME}
+# shared_key <SECRET_STRING>
+
+# secure yes
+# enable_strict_verification yes
+
+# ca_cert_path /etc/fluent/keys/your_ca_cert
+# ca_private_key_path /etc/fluent/keys/your_private_key
+ # for private CA secret key
+# ca_private_key_passphrase passphrase
+
+# <server>
+ # or IP
+# host server.fqdn.example.com
+# port 24284
+# </server>
+# <server>
+ # ip address to connect
+# host 203.0.113.8
+ # specify hostlabel for FQDN verification if ipaddress is used for host
+# hostlabel server.fqdn.example.com
+# </server>
diff --git a/roles/openshift_logging_mux/meta/main.yaml b/roles/openshift_logging_mux/meta/main.yaml
new file mode 100644
index 000000000..f40beb79d
--- /dev/null
+++ b/roles/openshift_logging_mux/meta/main.yaml
@@ -0,0 +1,15 @@
+---
+galaxy_info:
+ author: OpenShift Red Hat
+ description: OpenShift Aggregated Logging Mux Component
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_logging_mux/tasks/determine_version.yaml b/roles/openshift_logging_mux/tasks/determine_version.yaml
new file mode 100644
index 000000000..229bcf3d5
--- /dev/null
+++ b/roles/openshift_logging_mux/tasks/determine_version.yaml
@@ -0,0 +1,17 @@
+---
+# debating making this a module instead?
+- fail:
+ msg: Missing version to install provided by 'openshift_logging_image_version'
+ when: not openshift_logging_image_version or openshift_logging_image_version == ''
+
+- set_fact:
+ mux_version: "{{ __latest_mux_version }}"
+ when: openshift_logging_image_version == 'latest'
+
+# should we just assume that we will have the correct major version?
+- set_fact: mux_version="{{ openshift_logging_image_version | regex_replace('^v?(?P<major>\d)\.(?P<minor>\d).*$', '3_\\g<minor>') }}"
+ when: openshift_logging_image_version != 'latest'
+
+- fail:
+ msg: Invalid version specified for mux
+ when: mux_version not in __allowed_mux_versions
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
new file mode 100644
index 000000000..54af40070
--- /dev/null
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -0,0 +1,201 @@
+---
+- fail:
+ msg: Application logs destination is required
+ when: not openshift_logging_mux_app_host or openshift_logging_mux_app_host == ''
+
+- fail:
+ msg: Operations logs destination is required
+ when: not openshift_logging_mux_ops_host or openshift_logging_mux_ops_host == ''
+
+- include: determine_version.yaml
+
+# allow passing in a tempdir
+- name: Create temp directory for doing work in
+ command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
+ register: mktemp
+ changed_when: False
+
+- set_fact:
+ tempdir: "{{ mktemp.stdout }}"
+
+- name: Create templates subdirectory
+ file:
+ state: directory
+ path: "{{ tempdir }}/templates"
+ mode: 0755
+ changed_when: False
+
+# we want to make sure we have all the necessary components here
+
+# create service account
+- name: Create Mux service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ image_pull_secrets: "{{ openshift_logging_image_pull_secret }}"
+ when: openshift_logging_image_pull_secret != ''
+
+- name: Create Mux service account
+ oc_serviceaccount:
+ state: present
+ name: "aggregated-logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ when:
+ - openshift_logging_image_pull_secret == ''
+
+# set service account scc
+- name: Set privileged permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# set service account permissions
+- name: Set cluster-reader permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: cluster-role
+ resource_name: cluster-reader
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# set hostmount-anyuid permissions
+- name: Set hostmount-anyuid permissions for Mux
+ oc_adm_policy_user:
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ resource_kind: scc
+ resource_name: hostmount-anyuid
+ state: present
+ user: "system:serviceaccount:{{ openshift_logging_mux_namespace }}:aggregated-logging-mux"
+
+# create Mux configmap
+- copy:
+ src: fluent.conf
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is undefined
+ changed_when: no
+
+- copy:
+ src: secure-forward.conf
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_securefoward_contents is undefined
+ changed_when: no
+
+- copy:
+ content: "{{fluentd_mux_config_contents}}"
+ dest: "{{mktemp.stdout}}/fluent-mux.conf"
+ when: fluentd_mux_config_contents is defined
+ changed_when: no
+
+- copy:
+ content: "{{fluentd_mux_secureforward_contents}}"
+ dest: "{{mktemp.stdout}}/secure-forward-mux.conf"
+ when: fluentd_mux_secureforward_contents is defined
+ changed_when: no
+
+- name: Set Mux configmap
+ oc_configmap:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ from_file:
+ fluent.conf: "{{ tempdir }}/fluent-mux.conf"
+ secure-forward.conf: "{{ tempdir }}/secure-forward-mux.conf"
+
+# create Mux secret
+- name: Set logging-mux secret
+ oc_secret:
+ state: present
+ name: logging-mux
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ files:
+ - name: ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: key
+ path: "{{ generated_certs_dir }}/system.logging.mux.key"
+ - name: cert
+ path: "{{ generated_certs_dir }}/system.logging.mux.crt"
+ - name: shared_key
+ path: "{{ generated_certs_dir }}/mux_shared_key"
+
+# services
+- name: Set logging-mux service for external communication
+ oc_service:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ selector:
+ component: mux
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
+ external_ips:
+ - "{{ ansible_eth0.ipv4.address }}"
+ when: openshift_logging_mux_allow_external | bool
+
+- name: Set logging-mux service for internal communication
+ oc_service:
+ state: present
+ name: "logging-mux"
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ selector:
+ component: mux
+ provider: openshift
+ labels:
+ logging-infra: 'support'
+ ports:
+ - name: mux-forward
+ port: "{{ openshift_logging_mux_port }}"
+ targetPort: "mux-forward"
+ when: not openshift_logging_mux_allow_external | bool
+
+# create Mux DC
+- name: Generating mux deploymentconfig
+ template:
+ src: mux.j2
+ dest: "{{mktemp.stdout}}/templates/logging-mux-dc.yaml"
+ vars:
+ component: mux
+ logging_component: mux
+ deploy_name: "logging-{{ component }}"
+ image: "{{ openshift_logging_image_prefix }}logging-fluentd:{{ openshift_logging_image_version }}"
+ es_host: "{{ openshift_logging_mux_app_host }}"
+ es_port: "{{ openshift_logging_mux_app_port }}"
+ ops_host: "{{ openshift_logging_mux_ops_host }}"
+ ops_port: "{{ openshift_logging_mux_ops_port }}"
+ mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
+ mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
+ replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
+ mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
+ check_mode: no
+ changed_when: no
+
+- name: Set logging-mux DC
+ oc_obj:
+ state: present
+ name: logging-mux
+ namespace: "{{ openshift_logging_mux_namespace }}"
+ kind: dc
+ files:
+ - "{{ tempdir }}/templates/logging-mux-dc.yaml"
+ delete_after: true
+
+- name: Add mux namespaces
+ oc_project:
+ state: present
+ name: "{{ item }}"
+ node_selector: ""
+ with_items: "{{ openshift_logging_mux_namespaces | union(openshift_logging_mux_default_namespaces) }}"
+
+- name: Delete temp directory
+ file:
+ name: "{{ tempdir }}"
+ state: absent
+ changed_when: False
diff --git a/roles/openshift_logging/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 41e6abd52..502cd3347 100644
--- a/roles/openshift_logging/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -7,7 +7,7 @@ metadata:
component: "{{component}}"
logging-infra: "{{logging_component}}"
spec:
- replicas: {{replicas|default(0)}}
+ replicas: {{replicas|default(1)}}
selector:
provider: openshift
component: "{{component}}"
@@ -26,7 +26,7 @@ spec:
provider: openshift
component: "{{component}}"
spec:
- serviceAccountName: aggregated-logging-fluentd
+ serviceAccountName: aggregated-logging-mux
{% if mux_node_selector is iterable and mux_node_selector | length > 0 %}
nodeSelector:
{% for key, value in mux_node_selector.iteritems() %}
@@ -68,33 +68,33 @@ spec:
readOnly: true
env:
- name: "K8S_HOST_URL"
- value: "{{openshift_logging_master_url}}"
+ value: "{{openshift_logging_mux_master_url}}"
- name: "ES_HOST"
- value: "{{openshift_logging_es_host}}"
+ value: "{{openshift_logging_mux_app_host}}"
- name: "ES_PORT"
- value: "{{openshift_logging_es_port}}"
+ value: "{{openshift_logging_mux_app_port}}"
- name: "ES_CLIENT_CERT"
- value: "{{openshift_logging_es_client_cert}}"
+ value: "{{openshift_logging_mux_app_client_cert}}"
- name: "ES_CLIENT_KEY"
- value: "{{openshift_logging_es_client_key}}"
+ value: "{{openshift_logging_mux_app_client_key}}"
- name: "ES_CA"
- value: "{{openshift_logging_es_ca}}"
+ value: "{{openshift_logging_mux_app_ca}}"
- name: "OPS_HOST"
- value: "{{ops_host}}"
+ value: "{{openshift_logging_mux_ops_host}}"
- name: "OPS_PORT"
- value: "{{ops_port}}"
+ value: "{{openshift_logging_mux_ops_port}}"
- name: "OPS_CLIENT_CERT"
- value: "{{openshift_logging_es_ops_client_cert}}"
+ value: "{{openshift_logging_mux_ops_client_cert}}"
- name: "OPS_CLIENT_KEY"
- value: "{{openshift_logging_es_ops_client_key}}"
+ value: "{{openshift_logging_mux_ops_client_key}}"
- name: "OPS_CA"
- value: "{{openshift_logging_es_ops_ca}}"
+ value: "{{openshift_logging_mux_ops_ca}}"
- name: "USE_JOURNAL"
value: "false"
- name: "JOURNAL_SOURCE"
- value: "{{openshift_logging_fluentd_journal_source | default('')}}"
+ value: "{{openshift_logging_mux_journal_source | default('')}}"
- name: "JOURNAL_READ_FROM_HEAD"
- value: "{{openshift_logging_fluentd_journal_read_from_head|lower}}"
+ value: "{{openshift_logging_mux_journal_read_from_head|lower}}"
- name: FORWARD_LISTEN_HOST
value: "{{ openshift_logging_mux_hostname }}"
- name: FORWARD_LISTEN_PORT
@@ -102,7 +102,7 @@ spec:
- name: USE_MUX
value: "true"
- name: MUX_ALLOW_EXTERNAL
- value: "{{ openshift_logging_mux_allow_external| default('false') }}"
+ value: "{{ openshift_logging_mux_allow_external | default('false') }}"
volumes:
- name: config
configMap:
diff --git a/roles/openshift_logging_mux/vars/main.yml b/roles/openshift_logging_mux/vars/main.yml
new file mode 100644
index 000000000..4234b74e2
--- /dev/null
+++ b/roles/openshift_logging_mux/vars/main.yml
@@ -0,0 +1,3 @@
+---
+__latest_mux_version: "3_5"
+__allowed_mux_versions: ["3_5", "3_6"]
diff --git a/roles/openshift_master/README.md b/roles/openshift_master/README.md
index c3300a7ef..e5362105c 100644
--- a/roles/openshift_master/README.md
+++ b/roles/openshift_master/README.md
@@ -15,17 +15,18 @@ Role Variables
From this role:
-| Name | Default value | |
-|-------------------------------------|-----------------------|--------------------------------------------------|
-| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
+| Name | Default value | |
+|-------------------------------------|-----------------------|-------------------------------------------------------------------------------|
+| openshift_master_debug_level | openshift_debug_level | Verbosity of the debug logs for master |
| openshift_node_ips | [] | List of the openshift node ip addresses to pre-register when master starts up |
-| oreg_url | UNDEF | Default docker registry to use |
-| openshift_master_api_port | UNDEF | |
-| openshift_master_console_port | UNDEF | |
-| openshift_master_api_url | UNDEF | |
-| openshift_master_console_url | UNDEF | |
-| openshift_master_public_api_url | UNDEF | |
-| openshift_master_public_console_url | UNDEF | |
+| oreg_url | UNDEF | Default docker registry to use |
+| oreg_url_master | UNDEF | Default docker registry to use, specifically on the master |
+| openshift_master_api_port | UNDEF | |
+| openshift_master_console_port | UNDEF | |
+| openshift_master_api_url | UNDEF | |
+| openshift_master_console_url | UNDEF | |
+| openshift_master_public_api_url | UNDEF | |
+| openshift_master_public_console_url | UNDEF | |
From openshift_common:
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 5522fef26..aed5598c0 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -177,6 +177,7 @@
# https://github.com/openshift/origin/issues/6447
- name: Start and enable master
systemd:
+ daemon_reload: yes
name: "{{ openshift.common.service_type }}-master"
enabled: yes
state: started
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index ef0256af9..1935d9592 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -274,5 +274,12 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
+{% if openshift_template_service_broker_namespaces is defined %}
+templateServiceBrokerConfig:
+ templateNamespaces:
+{% for namespace in openshift_template_service_broker_namespaces %}
+ - {{ namespace }}
+{% endfor %}
+{% endif %}
volumeConfig:
dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master_certificates/tasks/main.yml b/roles/openshift_master_certificates/tasks/main.yml
index 9706da24b..62413536b 100644
--- a/roles/openshift_master_certificates/tasks/main.yml
+++ b/roles/openshift_master_certificates/tasks/main.yml
@@ -71,7 +71,7 @@
delegate_to: "{{ openshift_ca_host }}"
run_once: true
-- name: Generate the master client config
+- name: Generate the loopback master client config
command: >
{{ hostvars[openshift_ca_host].openshift.common.client_binary }} adm create-api-client-config
{% for named_ca_certificate in openshift.master.named_certificates | default([]) | oo_collect('cafile') %}
@@ -80,8 +80,8 @@
--certificate-authority={{ openshift_ca_cert }}
--client-dir={{ openshift_generated_configs_dir }}/master-{{ hostvars[item].openshift.common.hostname }}
--groups=system:masters,system:openshift-master
- --master={{ openshift.master.api_url }}
- --public-master={{ openshift.master.public_api_url }}
+ --master={{ hostvars[item].openshift.master.loopback_api_url }}
+ --public-master={{ hostvars[item].openshift.master.loopback_api_url }}
--signer-cert={{ openshift_ca_cert }}
--signer-key={{ openshift_ca_key }}
--signer-serial={{ openshift_ca_serial }}
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index 79f054b42..ef8dcd5fd 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -74,7 +74,7 @@
ldap_ca: "{{ openshift_master_ldap_ca | default(lookup('file', openshift_master_ldap_ca_file) if openshift_master_ldap_ca_file is defined else None) }}"
openid_ca: "{{ openshift_master_openid_ca | default(lookup('file', openshift_master_openid_ca_file) if openshift_master_openid_ca_file is defined else None) }}"
request_header_ca: "{{ openshift_master_request_header_ca | default(lookup('file', openshift_master_request_header_ca_file) if openshift_master_request_header_ca_file is defined else None) }}"
- registry_url: "{{ oreg_url | default(None) }}"
+ registry_url: "{{ oreg_url_master | default(oreg_url) | default(None) }}"
oauth_grant_method: "{{ openshift_master_oauth_grant_method | default(None) }}"
sdn_cluster_network_cidr: "{{ osm_cluster_network_cidr | default(None) }}"
sdn_host_subnet_length: "{{ osm_host_subnet_length | default(None) }}"
diff --git a/roles/openshift_metrics/tasks/generate_certificates.yaml b/roles/openshift_metrics/tasks/generate_certificates.yaml
index 7af3f9467..3dc15d58b 100644
--- a/roles/openshift_metrics/tasks/generate_certificates.yaml
+++ b/roles/openshift_metrics/tasks/generate_certificates.yaml
@@ -1,7 +1,7 @@
---
- name: generate ca certificate chain
command: >
- {{ openshift.common.admin_binary }} ca create-signer-cert
+ {{ openshift.common.client_binary }} adm ca create-signer-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/ca.key'
--cert='{{ mktemp.stdout }}/ca.crt'
diff --git a/roles/openshift_metrics/tasks/main.yaml b/roles/openshift_metrics/tasks/main.yaml
index 9af10a849..5d8506a73 100644
--- a/roles/openshift_metrics/tasks/main.yaml
+++ b/roles/openshift_metrics/tasks/main.yaml
@@ -1,5 +1,5 @@
---
-- local_action: shell rpm -q python-passlib || echo not installed
+- local_action: shell python -c 'import passlib' 2>/dev/null || echo not installed
register: passlib_result
- name: Check that python-passlib is available on the control host
diff --git a/roles/openshift_metrics/tasks/setup_certificate.yaml b/roles/openshift_metrics/tasks/setup_certificate.yaml
index 199968579..2d880f4d6 100644
--- a/roles/openshift_metrics/tasks/setup_certificate.yaml
+++ b/roles/openshift_metrics/tasks/setup_certificate.yaml
@@ -1,7 +1,7 @@
---
- name: generate {{ component }} keys
command: >
- {{ openshift.common.admin_binary }} ca create-server-cert
+ {{ openshift.common.client_binary }} adm ca create-server-cert
--config={{ mktemp.stdout }}/admin.kubeconfig
--key='{{ mktemp.stdout }}/{{ component }}.key'
--cert='{{ mktemp.stdout }}/{{ component }}.crt'
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index 889317847..fc82f49b1 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -30,6 +30,7 @@ spec:
{% endif %}
containers:
- image: "{{ openshift_metrics_image_prefix }}metrics-cassandra:{{ openshift_metrics_image_version }}"
+ imagePullPolicy: Always
name: hawkular-cassandra-{{ node }}
ports:
- name: cql-port
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 401db4e58..9a9363075 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -25,6 +25,7 @@ spec:
{% endif %}
containers:
- image: {{openshift_metrics_image_prefix}}metrics-hawkular-metrics:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
name: hawkular-metrics
ports:
- name: http-endpoint
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index ab998c2fb..d8c7763ea 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -27,6 +27,7 @@ spec:
containers:
- name: heapster
image: {{openshift_metrics_image_prefix}}metrics-heapster:{{openshift_metrics_image_version}}
+ imagePullPolicy: Always
ports:
- containerPort: 8082
name: "http-endpoint"
diff --git a/roles/openshift_node/README.md b/roles/openshift_node/README.md
index b69b60c1d..fb0b494da 100644
--- a/roles/openshift_node/README.md
+++ b/roles/openshift_node/README.md
@@ -15,10 +15,11 @@ Role Variables
--------------
From this role:
-| Name | Default value | |
-|------------------------------------------|-----------------------|--------------------------------------------------------|
-| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
-| oreg_url | UNDEF (Optional) | Default docker registry to use |
+| Name | Default value | |
+|----------------------------|-----------------------|----------------------------------------------------------|
+| openshift_node_debug_level | openshift_debug_level | Verbosity of the debug logs for node |
+| oreg_url | UNDEF (Optional) | Default docker registry to use |
+| oreg_url_node | UNDEF (Optional) | Default docker registry to use, specifically on the node |
From openshift_common:
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index cb51416d4..4dcf1eef8 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -12,3 +12,6 @@
- name: restart node
systemd: name={{ openshift.common.service_type }}-node state=restarted
when: (not skip_node_svc_handlers | default(False) | bool) and not (node_service_status_changed | default(false) | bool)
+
+- name: reload sysctl.conf
+ command: /sbin/sysctl -p
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 656874f56..573051504 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -22,7 +22,7 @@
iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
labels: "{{ lookup('oo_option', 'openshift_node_labels') | default( openshift_node_labels | default(none), true) }}"
- registry_url: "{{ oreg_url | default(none) }}"
+ registry_url: "{{ oreg_url_node | default(oreg_url) | default(None) }}"
schedulable: "{{ openshift_schedulable | default(openshift_scheduleable) | default(None) }}"
sdn_mtu: "{{ openshift_node_sdn_mtu | default(None) }}"
storage_plugin_deps: "{{ osn_storage_plugin_deps | default(None) }}"
@@ -104,8 +104,14 @@
# The atomic-openshift-node service will set this parameter on
# startup, but if the network service is restarted this setting is
# lost. Reference: https://bugzilla.redhat.com/show_bug.cgi?id=1372388
+#
+# Use lineinfile w/ a handler for this task until
+# https://github.com/ansible/ansible/pull/24277 is included in an
+# ansible release and we can use the sysctl module.
- name: Persist net.ipv4.ip_forward sysctl entry
- sysctl: name="net.ipv4.ip_forward" value=1 sysctl_set=yes state=present reload=yes
+ lineinfile: dest=/etc/sysctl.conf regexp='^net.ipv4.ip_forward' line='net.ipv4.ip_forward=1'
+ notify:
+ - reload sysctl.conf
- name: Start and enable openvswitch service
systemd:
@@ -200,6 +206,7 @@
- name: Start and enable node dep
systemd:
+ daemon_reload: yes
name: "{{ openshift.common.service_type }}-node-dep"
enabled: yes
state: started
diff --git a/roles/openshift_node_upgrade/tasks/main.yml b/roles/openshift_node_upgrade/tasks/main.yml
index 7231bdb9d..d44839d69 100644
--- a/roles/openshift_node_upgrade/tasks/main.yml
+++ b/roles/openshift_node_upgrade/tasks/main.yml
@@ -92,8 +92,8 @@
yedit:
src: "{{ openshift.common.config_base }}/node/node-config.yaml"
key: 'imageConfig.format'
- value: "{{ oreg_url }}"
- when: oreg_url is defined
+ value: "{{ oreg_url | default(oreg_url_node) }}"
+ when: oreg_url is defined or oreg_url_node is defined
# https://docs.openshift.com/container-platform/3.4/admin_guide/overcommit.html#disabling-swap-memory
- name: Check for swap usage
diff --git a/roles/openshift_node_upgrade/tasks/restart.yml b/roles/openshift_node_upgrade/tasks/restart.yml
index e576228ba..508eb9358 100644
--- a/roles/openshift_node_upgrade/tasks/restart.yml
+++ b/roles/openshift_node_upgrade/tasks/restart.yml
@@ -5,6 +5,14 @@
# - openshift.common.hostname
# - openshift.master.api_port
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd to ensure latest unit files
+ command: systemctl daemon-reload
+
- name: Restart docker
service:
name: "{{ openshift.docker.service_name }}"
diff --git a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
index 480e87d58..06a2d16ba 100644
--- a/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
+++ b/roles/openshift_node_upgrade/tasks/rpm_upgrade.yml
@@ -12,3 +12,18 @@
- name: Ensure python-yaml present for config upgrade
package: name=PyYAML state=present
when: not openshift.common.is_atomic | bool
+
+- name: Install Node service file
+ template:
+ dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
+ src: "{{ openshift.common.service_type }}-node.service.j2"
+ register: l_node_unit
+
+# NOTE: This is needed to make sure we are using the correct set
+# of systemd unit files. The RPMs lay down defaults but
+# the install/upgrade may override them in /etc/systemd/system/.
+# NOTE: We don't use the systemd module as some versions of the module
+# require a service to be part of the call.
+- name: Reload systemd units
+ command: systemctl daemon-reload
+ when: l_node_unit | changed
diff --git a/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2 b/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2
new file mode 120000
index 000000000..6041fb13a
--- /dev/null
+++ b/roles/openshift_node_upgrade/templates/atomic-openshift-node.service.j2
@@ -0,0 +1 @@
+../../openshift_node/templates/atomic-openshift-node.service.j2 \ No newline at end of file
diff --git a/roles/openshift_node_upgrade/templates/origin-node.service.j2 b/roles/openshift_node_upgrade/templates/origin-node.service.j2
new file mode 120000
index 000000000..79c45a303
--- /dev/null
+++ b/roles/openshift_node_upgrade/templates/origin-node.service.j2
@@ -0,0 +1 @@
+../../openshift_node/templates/origin-node.service.j2 \ No newline at end of file
diff --git a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
index 88801e487..a86c96df7 100644
--- a/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
+++ b/roles/openshift_storage_glusterfs/filter_plugins/openshift_storage_glusterfs.py
@@ -8,7 +8,7 @@ def map_from_pairs(source, delim="="):
if source == '':
return dict()
- return dict(source.split(delim) for item in source.split(","))
+ return dict(item.split(delim) for item in source.split(","))
# pylint: disable=too-few-public-methods
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index f2f4d16f0..16792388f 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -30,7 +30,8 @@
- set_fact:
openshift_release: "{{ openshift_release | string }}"
- when: openshift_release is defined
+ when:
+ - openshift_release is defined
# Verify that the image tag is in a valid format
- when:
@@ -106,7 +107,11 @@
fail:
msg: "OCP rpm version {{ openshift_rpm_version }} is different from OCP image version {{ openshift_version }}"
# Both versions have the same string representation
- when: openshift_rpm_version != openshift_version
+ when:
+ - openshift_rpm_version != openshift_version
+ # if openshift_pkg_version or openshift_image_tag is defined, user gives a permission the rpm and docker image versions can differ
+ - openshift_pkg_version is not defined
+ - openshift_image_tag is not defined
when:
- is_containerized | bool
- not is_atomic | bool
@@ -117,7 +122,7 @@
debug:
msg: >
openshift_image_tag is used for containerized installs. If you are trying to
- specify an image for a non-container install see oreg_url.
+ specify an image for a non-container install see oreg_url or oreg_url_master or oreg_url_node.
when:
- not is_containerized | bool
- openshift_image_tag is defined
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
index 8542029f6..0d8162c2e 100644
--- a/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/Dockerfile
@@ -25,6 +25,8 @@ RUN cd /root/rpmbuild/SOURCES && \
rpmbuild -bb /root/ose-3.3.spec && \
mkdir /mnt/localrepo/ose-3.{2,3} && \
cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.2-1.noarch.rpm /mnt/localrepo/ose-3.2 && \
+ cp /root/rpmbuild/RPMS/noarch/{openvswitch-2.4,docker-1.10}-1.noarch.rpm /mnt/localrepo/ose-3.2 && \
createrepo /mnt/localrepo/ose-3.2 && \
cp /root/rpmbuild/RPMS/noarch/atomic-openshift*-3.3-1.noarch.rpm /mnt/localrepo/ose-3.3 && \
+ cp /root/rpmbuild/RPMS/noarch/{openvswitch-2.4,docker-1.10}-1.noarch.rpm /mnt/localrepo/ose-3.3 && \
createrepo /mnt/localrepo/ose-3.3
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
index dbc9f0c8e..3b3eab696 100644
--- a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.2.spec
@@ -12,6 +12,12 @@ BuildArch: noarch
Summary: package the critical aos packages
%package node
Summary: package the critical aos packages
+%package -n openvswitch
+Summary: package the critical aos packages
+Version: 2.4
+%package -n docker
+Summary: package the critical aos packages
+Version: 1.10
%description
Package for pretending to provide AOS
@@ -22,6 +28,12 @@ Package for pretending to provide AOS
%description node
Package for pretending to provide AOS
+%description -n openvswitch
+Package for pretending to provide openvswitch
+
+%description -n docker
+Package for pretending to provide docker
+
%prep
%setup -q
@@ -37,8 +49,9 @@ mkdir -p $RPM_BUILD_ROOT
%files
%files master
%files node
-%doc
-
+%files -n openvswitch
+%files -n docker
+%doc
%changelog
diff --git a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
index 9546e8430..66be0a862 100644
--- a/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
+++ b/test/integration/openshift_health_checker/builds/aos-package-checks/root/root/ose-3.3.spec
@@ -12,6 +12,12 @@ BuildArch: noarch
Summary: package the critical aos packages
%package node
Summary: package the critical aos packages
+%package -n openvswitch
+Summary: package the critical aos packages
+Version: 2.4
+%package -n docker
+Summary: package the critical aos packages
+Version: 1.10
%description
Package for pretending to provide AOS
@@ -22,6 +28,12 @@ Package for pretending to provide AOS
%description node
Package for pretending to provide AOS
+%description -n openvswitch
+Package for pretending to provide openvswitch
+
+%description -n docker
+Package for pretending to provide docker
+
%prep
%setup -q
@@ -37,8 +49,9 @@ mkdir -p $RPM_BUILD_ROOT
%files
%files master
%files node
-%doc
-
+%files -n openvswitch
+%files -n docker
+%doc
%changelog
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
index 31d0d521e..a557282b4 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_availability_missing_required.yml
@@ -1,4 +1,5 @@
---
+# NOTE: this test is probably superfluous since openshift_version already does it
- include: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
@@ -7,11 +8,22 @@
- name: Fail as required packages cannot be installed
hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
roles:
- openshift_health_checker
- tasks:
+
+ post_tasks:
- block:
+ # put the repo back to disabled
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2", repo_enabled: 0 }
+
- action: openshift_health_check
args:
checks: [ 'package_availability' ]
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
index 7b6e71f91..0929b73ce 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_dep_missing.yml
@@ -8,9 +8,16 @@
- name: Fails when a dependency required for update is missing
hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
roles:
- openshift_health_checker
- tasks:
+
+ post_tasks:
- block:
- include: tasks/enable_repo.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
index c2e9c3866..f8790358a 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_broken.yml
@@ -8,9 +8,16 @@
- name: Fails when a repo definition is completely broken
hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
roles:
- openshift_health_checker
- tasks:
+
+ post_tasks:
- block:
- include: tasks/enable_repo.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
index 98d41aad4..e2bb84715 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_disabled.yml
@@ -8,9 +8,16 @@
- name: Succeeds when nothing blocks a yum update
hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
roles:
- openshift_health_checker
- tasks:
+
+ post_tasks:
- block:
- action: openshift_health_check
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
index 60ab9942a..28efdd81d 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_update_repo_unreachable.yml
@@ -8,9 +8,16 @@
- name: Fails when repo content is not available
hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
roles:
- openshift_health_checker
- tasks:
+
+ post_tasks:
- block:
- include: tasks/enable_repo.yml
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
index cd60dee5a..58bed0fc0 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_matches.yml
@@ -8,13 +8,21 @@
- name: Success when AOS version matches openshift_release
hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
roles:
- openshift_health_checker
- tasks:
+
+ post_tasks:
- block:
+ # disable extras so we control docker version
- include: tasks/enable_repo.yml
- vars: { repo_name: "ose-3.2" }
+ vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
- action: openshift_health_check
args:
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
index 5939a1ef1..c26413009 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_mismatches.yml
@@ -1,4 +1,5 @@
---
+# NOTE: this test is probably superfluous since openshift_version already does it
- include: ../../setup_container.yml
vars:
image: preflight-aos-package-checks
@@ -8,14 +9,24 @@
- name: Failure when AOS version doesn't match openshift_release
hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.3" }
+
roles:
- openshift_health_checker
- tasks:
+
+ post_tasks:
- block:
+ # put the repo back to disabled
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.3", repo_enabled: 0 }
+ # test with wrong repo enabled
- include: tasks/enable_repo.yml
vars: { repo_name: "ose-3.2" }
-
- action: openshift_health_check
args:
checks: [ 'package_version' ]
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
index be0f9bc7a..850a55a72 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/package_version_multiple.yml
@@ -7,16 +7,25 @@
- name: Fails when multiple AOS versions are available
hosts: all
+ pre_tasks:
+
+ # run before roles to prevent openshift_version breaking
+ - include: tasks/enable_repo.yml
+ vars: { repo_name: "ose-3.2" }
+
roles:
- openshift_health_checker
- tasks:
+
+ post_tasks:
- block:
+ # enable repo with extra minor version available
- include: tasks/enable_repo.yml
- vars: { repo_name: "ose-3.2" }
+ vars: { repo_name: "ose-3.3" }
+ # disable extras so we control docker version
- include: tasks/enable_repo.yml
- vars: { repo_name: "ose-3.3" }
+ vars: { repo_file: "CentOS-Base", repo_name: "extras", repo_enabled: 0 }
- action: openshift_health_check
args:
diff --git a/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
index aaacf205e..6022f4289 100644
--- a/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
+++ b/test/integration/openshift_health_checker/preflight/playbooks/tasks/enable_repo.yml
@@ -3,7 +3,7 @@
# believe it or not we can't use the yum_repository module for this.
# https://github.com/ansible/ansible-modules-extras/issues/2384
ini_file:
- dest: /etc/yum.repos.d/{{ repo_name }}.repo
+ dest: /etc/yum.repos.d/{{ repo_file | default(repo_name) }}.repo
section: "{{ repo_name }}"
option: enabled
- value: 1
+ value: "{{ repo_enabled | default(1) }}"
diff --git a/test/integration/openshift_health_checker/preflight/preflight_test.go b/test/integration/openshift_health_checker/preflight/preflight_test.go
index 05ddf139f..9dfd713ec 100644
--- a/test/integration/openshift_health_checker/preflight/preflight_test.go
+++ b/test/integration/openshift_health_checker/preflight/preflight_test.go
@@ -66,7 +66,7 @@ func TestPackageVersionMismatches(t *testing.T) {
ExitCode: 2,
Output: []string{
"check \"package_version\":",
- "Not all of the required packages are available at requested version",
+ "Not all of the required packages are available at their requested version",
},
}.Run(t)
}
diff --git a/test/integration/openshift_health_checker/teardown_container.yml b/test/integration/openshift_health_checker/teardown_container.yml
index fe11e2617..e84fee1f5 100644
--- a/test/integration/openshift_health_checker/teardown_container.yml
+++ b/test/integration/openshift_health_checker/teardown_container.yml
@@ -21,3 +21,4 @@
docker_container:
name: "{{ inventory_hostname }}"
state: absent
+ failed_when: False