summaryrefslogtreecommitdiffstats
diff options
context:
space:
mode:
-rw-r--r--.tito/packages/openshift-ansible2
-rw-r--r--docs/proposals/role_decomposition.md6
-rw-r--r--files/origin-components/template-service-broker-registration.yaml4
-rw-r--r--filter_plugins/oo_filters.py394
-rw-r--r--images/installer/README_CONTAINER_IMAGE.md4
-rwxr-xr-ximages/installer/root/usr/local/bin/run5
-rw-r--r--inventory/byo/hosts.example (renamed from inventory/byo/hosts.ose.example)211
-rw-r--r--inventory/byo/hosts.origin.example899
-rw-r--r--openshift-ansible.spec435
-rw-r--r--playbooks/aws/BUILD_AMI.md21
-rw-r--r--playbooks/aws/PREREQUISITES.md40
-rw-r--r--playbooks/aws/README.md141
-rwxr-xr-xplaybooks/aws/openshift-cluster/accept.yml6
-rw-r--r--playbooks/aws/openshift-cluster/build_ami.yml77
-rw-r--r--playbooks/aws/openshift-cluster/install.yml63
-rw-r--r--playbooks/aws/openshift-cluster/prerequisites.yml8
-rw-r--r--playbooks/aws/openshift-cluster/provision.yml2
-rw-r--r--playbooks/aws/openshift-cluster/provision_instance.yml12
-rw-r--r--playbooks/aws/openshift-cluster/provision_sec_group.yml13
-rw-r--r--playbooks/aws/openshift-cluster/provision_ssh_keypair.yml12
-rw-r--r--playbooks/aws/openshift-cluster/provision_vpc.yml10
-rw-r--r--playbooks/aws/openshift-cluster/provisioning_vars.example.yml28
-rw-r--r--playbooks/aws/openshift-cluster/seal_ami.yml12
-rw-r--r--playbooks/aws/provisioning-inventory.example.ini25
-rw-r--r--playbooks/aws/provisioning_vars.yml.example120
-rw-r--r--playbooks/byo/openshift-cfme/uninstall.yml6
-rw-r--r--playbooks/byo/openshift-cluster/config.yml2
-rw-r--r--playbooks/byo/openshift-etcd/embedded2external.yml6
-rw-r--r--playbooks/byo/openshift-management/add_container_provider.yml6
-rw-r--r--playbooks/byo/openshift-management/add_many_container_providers.yml36
-rw-r--r--playbooks/byo/openshift-management/config.yml (renamed from playbooks/byo/openshift-cfme/config.yml)4
l---------playbooks/byo/openshift-management/roles (renamed from playbooks/common/openshift-cfme/roles)0
-rw-r--r--playbooks/byo/openshift-management/uninstall.yml2
-rw-r--r--playbooks/byo/openshift-master/certificates.yml2
-rw-r--r--playbooks/byo/openshift-node/scaleup.yml2
-rw-r--r--playbooks/common/openshift-cfme/config.yml44
-rw-r--r--playbooks/common/openshift-checks/adhoc.yml5
-rw-r--r--playbooks/common/openshift-checks/health.yml6
-rw-r--r--playbooks/common/openshift-checks/install.yml47
-rw-r--r--playbooks/common/openshift-checks/pre-install.yml6
-rw-r--r--playbooks/common/openshift-cluster/config.yml32
-rw-r--r--playbooks/common/openshift-cluster/create_persistent_volumes.yml9
-rw-r--r--playbooks/common/openshift-cluster/evaluate_groups.yml2
-rw-r--r--playbooks/common/openshift-cluster/initialize_facts.yml15
-rw-r--r--playbooks/common/openshift-cluster/initialize_openshift_version.yml15
-rw-r--r--playbooks/common/openshift-cluster/openshift_default_storage_class.yml2
-rw-r--r--playbooks/common/openshift-cluster/openshift_hosted.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_logging.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_metrics.yml6
-rw-r--r--playbooks/common/openshift-cluster/openshift_prometheus.yml20
-rw-r--r--playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml12
-rw-r--r--playbooks/common/openshift-cluster/service_catalog.yml6
-rw-r--r--playbooks/common/openshift-cluster/std_include.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/post_control_plane.yml25
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml37
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml6
-rw-r--r--playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml8
-rw-r--r--playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml56
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml4
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml14
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml2
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml1
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml18
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml16
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml15
-rw-r--r--playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml2
-rw-r--r--playbooks/common/openshift-etcd/certificates.yml29
-rw-r--r--playbooks/common/openshift-etcd/config.yml10
-rw-r--r--playbooks/common/openshift-etcd/embedded2external.yml172
-rw-r--r--playbooks/common/openshift-etcd/master_etcd_certificates.yml14
-rw-r--r--playbooks/common/openshift-etcd/migrate.yml24
-rw-r--r--playbooks/common/openshift-etcd/scaleup.yml4
-rw-r--r--playbooks/common/openshift-etcd/server_certificates.yml15
-rw-r--r--playbooks/common/openshift-glusterfs/config.yml16
-rw-r--r--playbooks/common/openshift-loadbalancer/config.yml17
-rw-r--r--playbooks/common/openshift-management/add_container_provider.yml8
-rw-r--r--playbooks/common/openshift-management/config.yml35
l---------playbooks/common/openshift-management/filter_plugins (renamed from playbooks/common/openshift-cfme/filter_plugins)0
l---------playbooks/common/openshift-management/library (renamed from playbooks/common/openshift-cfme/library)0
l---------playbooks/common/openshift-management/roles1
-rw-r--r--playbooks/common/openshift-management/uninstall.yml (renamed from playbooks/common/openshift-cfme/uninstall.yml)4
-rw-r--r--playbooks/common/openshift-master/additional_config.yml10
-rw-r--r--playbooks/common/openshift-master/ca.yml8
-rw-r--r--playbooks/common/openshift-master/config.yml15
-rw-r--r--playbooks/common/openshift-master/scaleup.yml2
-rw-r--r--playbooks/common/openshift-nfs/config.yml6
-rw-r--r--playbooks/common/openshift-node/additional_config.yml14
-rw-r--r--playbooks/common/openshift-node/clean_image.yml10
-rw-r--r--playbooks/common/openshift-node/config.yml8
-rw-r--r--playbooks/common/openshift-node/configure_nodes.yml1
-rw-r--r--playbooks/common/openshift-node/image_prep.yml24
-rw-r--r--roles/ansible_service_broker/defaults/main.yml5
-rw-r--r--roles/ansible_service_broker/tasks/install.yml50
-rw-r--r--roles/ansible_service_broker/tasks/remove.yml12
-rw-r--r--roles/ansible_service_broker/vars/default_images.yml2
-rw-r--r--roles/ansible_service_broker/vars/openshift-enterprise.yml5
-rw-r--r--roles/docker/defaults/main.yml13
-rw-r--r--roles/docker/handlers/main.yml1
-rw-r--r--roles/docker/meta/main.yml1
-rw-r--r--roles/docker/tasks/crio_firewall.yml40
-rw-r--r--roles/docker/tasks/main.yml17
-rw-r--r--roles/docker/tasks/package_docker.yml45
-rw-r--r--roles/docker/tasks/registry_auth.yml16
-rw-r--r--roles/docker/tasks/systemcontainer_crio.yml79
-rw-r--r--roles/docker/tasks/systemcontainer_docker.yml57
-rw-r--r--roles/docker/templates/crio.conf.j226
-rw-r--r--roles/docker/templates/custom.conf.j26
-rw-r--r--roles/etcd/defaults/main.yaml9
-rw-r--r--roles/etcd/tasks/auxiliary/clean_data.yml2
-rw-r--r--roles/etcd/tasks/auxiliary/disable_etcd.yml5
-rw-r--r--roles/etcd/tasks/auxiliary/force_new_cluster.yml31
-rw-r--r--roles/etcd/tasks/backup.archive.yml3
-rw-r--r--roles/etcd/tasks/backup.copy.yml3
-rw-r--r--roles/etcd/tasks/backup.fetch.yml3
-rw-r--r--roles/etcd/tasks/backup.force_new_cluster.yml12
-rw-r--r--roles/etcd/tasks/backup.unarchive.yml3
-rw-r--r--roles/etcd/tasks/backup/archive.yml5
-rw-r--r--roles/etcd/tasks/backup/backup.yml34
-rw-r--r--roles/etcd/tasks/backup/copy.yml5
-rw-r--r--roles/etcd/tasks/backup/fetch.yml8
-rw-r--r--roles/etcd/tasks/backup/unarchive.yml14
-rw-r--r--roles/etcd/tasks/backup/vars.yml15
-rw-r--r--roles/etcd/tasks/backup_master_etcd_certificates.yml2
-rw-r--r--roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml7
-rw-r--r--roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml21
-rw-r--r--roles/etcd/tasks/check_cluster_health.yml2
-rw-r--r--roles/etcd/tasks/disable_etcd.yml2
-rw-r--r--roles/etcd/tasks/fetch_backup.yml8
-rw-r--r--roles/etcd/tasks/migration/check.yml11
-rw-r--r--roles/etcd/tasks/system_container.yml58
-rw-r--r--roles/etcd/templates/etcd.conf.j24
-rw-r--r--roles/flannel/handlers/main.yml9
-rw-r--r--roles/installer_checkpoint/README.md3
-rw-r--r--roles/installer_checkpoint/callback_plugins/installer_checkpoint.py22
-rw-r--r--roles/kuryr/README.md38
-rw-r--r--roles/kuryr/defaults/main.yaml72
-rw-r--r--roles/kuryr/meta/main.yml17
-rw-r--r--roles/kuryr/tasks/master.yaml52
-rw-r--r--roles/kuryr/tasks/node.yaml48
-rw-r--r--roles/kuryr/tasks/serviceaccount.yaml31
-rw-r--r--roles/kuryr/templates/cni-daemonset.yaml.j253
-rw-r--r--roles/kuryr/templates/configmap.yaml.j2343
-rw-r--r--roles/kuryr/templates/controller-deployment.yaml.j240
-rw-r--r--roles/lib_openshift/library/oc_adm_csr.py16
-rw-r--r--roles/lib_openshift/library/oc_secret.py2
-rw-r--r--roles/lib_openshift/src/class/oc_adm_csr.py16
-rw-r--r--roles/lib_openshift/src/class/oc_secret.py2
-rw-r--r--roles/openshift_atomic/README.md28
-rw-r--r--roles/openshift_atomic/meta/main.yml13
-rw-r--r--roles/openshift_atomic/tasks/proxy.yml32
-rw-r--r--roles/openshift_aws/README.md79
-rw-r--r--roles/openshift_aws/defaults/main.yml23
-rw-r--r--roles/openshift_aws/tasks/launch_config.yml23
-rw-r--r--roles/openshift_aws/tasks/master_facts.yml22
-rw-r--r--roles/openshift_aws/tasks/provision.yml8
-rw-r--r--roles/openshift_aws/tasks/provision_instance.yml (renamed from roles/openshift_aws/tasks/build_ami.yml)39
-rw-r--r--roles/openshift_aws/tasks/scale_group.yml2
-rw-r--r--roles/openshift_aws/tasks/setup_master_group.yml35
-rw-r--r--roles/openshift_aws/templates/user_data.j226
-rw-r--r--roles/openshift_ca/tasks/main.yml4
-rw-r--r--roles/openshift_cfme/README.md404
-rw-r--r--roles/openshift_cfme/defaults/main.yml42
-rw-r--r--roles/openshift_cfme/files/miq-template.yaml566
-rw-r--r--roles/openshift_cfme/files/openshift_cfme.exports3
-rw-r--r--roles/openshift_cfme/handlers/main.yml37
-rw-r--r--roles/openshift_cfme/img/CFMEBasicDeployment.pngbin38316 -> 0 bytes
-rw-r--r--roles/openshift_cfme/tasks/create_pvs.yml36
-rw-r--r--roles/openshift_cfme/tasks/main.yml117
-rw-r--r--roles/openshift_cfme/tasks/nfs.yml51
-rw-r--r--roles/openshift_cfme/tasks/tune_masters.yml12
-rw-r--r--roles/openshift_cfme/tasks/uninstall.yml46
-rw-r--r--roles/openshift_cfme/templates/miq-pv-db.yaml.j213
-rw-r--r--roles/openshift_cfme/templates/miq-pv-region.yaml.j213
-rw-r--r--roles/openshift_cfme/templates/miq-pv-server.yaml.j213
-rw-r--r--roles/openshift_cli/tasks/main.yml13
-rw-r--r--roles/openshift_default_storage_class/defaults/main.yml6
-rw-r--r--roles/openshift_default_storage_class/tasks/main.yml2
-rw-r--r--roles/openshift_etcd_facts/vars/main.yml1
-rwxr-xr-xroles/openshift_examples/examples-sync.sh2
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json8
-rw-r--r--roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json76
-rw-r--r--roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json71
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json13
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json13
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json13
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json13
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json12
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json22
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json22
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json11
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json104
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json120
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json120
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json120
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json120
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-basic.json372
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-https.json550
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql-persistent.json852
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql.json811
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql-persistent.json824
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql.json783
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-persistent-s2i.json872
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-s2i.json817
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-basic-s2i.json389
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-https-s2i.json585
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-persistent-s2i.json862
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-s2i.json821
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-persistent-s2i.json878
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-s2i.json837
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-persistent-s2i.json852
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-s2i.json811
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-sso-s2i.json823
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-third-party-db-s2i.json657
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json6
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json4
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json63
-rw-r--r--roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json63
-rw-r--r--roles/openshift_excluder/tasks/install.yml31
-rwxr-xr-xroles/openshift_facts/library/openshift_facts.py78
-rw-r--r--roles/openshift_gcp/templates/provision.j2.sh11
-rw-r--r--roles/openshift_gcp/templates/remove.j2.sh26
-rw-r--r--roles/openshift_health_checker/action_plugins/openshift_health_check.py3
-rw-r--r--roles/openshift_health_checker/openshift_checks/__init__.py7
-rw-r--r--roles/openshift_health_checker/openshift_checks/disk_availability.py43
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_image_availability.py122
-rw-r--r--roles/openshift_health_checker/openshift_checks/docker_storage.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_traffic.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/etcd_volume.py6
-rw-r--r--roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/memory_availability.py6
-rw-r--r--roles/openshift_health_checker/openshift_checks/mixins.py8
-rw-r--r--roles/openshift_health_checker/openshift_checks/ovs_version.py2
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_availability.py4
-rw-r--r--roles/openshift_health_checker/openshift_checks/package_version.py2
-rw-r--r--roles/openshift_health_checker/test/action_plugin_test.py1
-rw-r--r--roles/openshift_health_checker/test/disk_availability_test.py63
-rw-r--r--roles/openshift_health_checker/test/docker_image_availability_test.py67
-rw-r--r--roles/openshift_health_checker/test/docker_storage_test.py6
-rw-r--r--roles/openshift_health_checker/test/etcd_traffic_test.py22
-rw-r--r--roles/openshift_health_checker/test/fluentd_config_test.py10
-rw-r--r--roles/openshift_health_checker/test/memory_availability_test.py36
-rw-r--r--roles/openshift_health_checker/test/ovs_version_test.py12
-rw-r--r--roles/openshift_health_checker/test/package_availability_test.py6
-rw-r--r--roles/openshift_health_checker/test/package_version_test.py12
-rw-r--r--roles/openshift_hosted/defaults/main.yml1
-rw-r--r--roles/openshift_hosted/templates/registry_config.j24
-rw-r--r--roles/openshift_hosted_facts/tasks/main.yml2
-rw-r--r--roles/openshift_logging/README.md98
-rw-r--r--roles/openshift_logging/defaults/main.yml40
-rw-r--r--roles/openshift_logging/filter_plugins/openshift_logging.py63
-rw-r--r--roles/openshift_logging/filter_plugins/test49
-rw-r--r--roles/openshift_logging/library/openshift_logging_facts.py13
-rw-r--r--roles/openshift_logging/tasks/delete_logging.yaml1
-rw-r--r--roles/openshift_logging/tasks/generate_certs.yaml4
-rw-r--r--roles/openshift_logging/tasks/install_logging.yaml53
-rw-r--r--roles/openshift_logging/templates/jks_pod.j22
-rw-r--r--roles/openshift_logging_curator/defaults/main.yml5
-rw-r--r--roles/openshift_logging_curator/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_curator/templates/curator.j217
-rw-r--r--roles/openshift_logging_elasticsearch/defaults/main.yml9
-rw-r--r--roles/openshift_logging_elasticsearch/files/es_migration.sh79
-rw-r--r--roles/openshift_logging_elasticsearch/tasks/main.yaml242
-rw-r--r--roles/openshift_logging_elasticsearch/templates/es.j256
-rw-r--r--roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j231
-rw-r--r--roles/openshift_logging_elasticsearch/vars/default_images.yml3
-rw-r--r--roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml3
-rw-r--r--roles/openshift_logging_eventrouter/README.md6
-rw-r--r--roles/openshift_logging_eventrouter/defaults/main.yaml3
-rw-r--r--roles/openshift_logging_eventrouter/files/eventrouter-template.yaml2
-rw-r--r--roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml2
-rw-r--r--roles/openshift_logging_eventrouter/templates/eventrouter-template.j212
-rw-r--r--roles/openshift_logging_fluentd/defaults/main.yml9
-rw-r--r--roles/openshift_logging_fluentd/tasks/main.yaml8
-rw-r--r--roles/openshift_logging_fluentd/templates/fluentd.j259
-rw-r--r--roles/openshift_logging_kibana/defaults/main.yml2
-rw-r--r--roles/openshift_logging_kibana/tasks/main.yaml2
-rw-r--r--roles/openshift_logging_kibana/templates/kibana.j242
-rw-r--r--roles/openshift_logging_mux/defaults/main.yml13
-rw-r--r--roles/openshift_logging_mux/tasks/main.yaml1
-rw-r--r--roles/openshift_logging_mux/templates/mux.j221
-rw-r--r--roles/openshift_manageiq/tasks/main.yaml5
-rw-r--r--roles/openshift_management/README.md582
-rw-r--r--roles/openshift_management/defaults/main.yml104
-rw-r--r--roles/openshift_management/files/examples/container_providers.yml22
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml28
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml10
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml13
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml38
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml38
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml35
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml38
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml763
-rw-r--r--roles/openshift_management/files/templates/cloudforms/cfme-template.yaml940
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml28
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml10
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml13
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml38
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml38
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml35
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml771
-rw-r--r--roles/openshift_management/files/templates/manageiq/miq-template.yaml948
-rw-r--r--roles/openshift_management/filter_plugins/oo_management_filters.py32
-rw-r--r--roles/openshift_management/handlers/main.yml0
-rw-r--r--roles/openshift_management/meta/main.yml (renamed from roles/openshift_cfme/meta/main.yml)1
-rw-r--r--roles/openshift_management/tasks/accounts.yml28
-rw-r--r--roles/openshift_management/tasks/add_container_provider.yml65
-rw-r--r--roles/openshift_management/tasks/main.yml96
-rw-r--r--roles/openshift_management/tasks/noop.yml1
-rw-r--r--roles/openshift_management/tasks/storage/create_nfs_pvs.yml69
-rw-r--r--roles/openshift_management/tasks/storage/nfs.yml36
-rw-r--r--roles/openshift_management/tasks/storage/nfs_server.yml45
-rw-r--r--roles/openshift_management/tasks/storage/storage.yml3
-rw-r--r--roles/openshift_management/tasks/template.yml128
-rw-r--r--roles/openshift_management/tasks/uninstall.yml23
-rw-r--r--roles/openshift_management/tasks/validate.yml90
-rw-r--r--roles/openshift_management/vars/main.yml76
-rw-r--r--roles/openshift_master/defaults/main.yml103
-rw-r--r--roles/openshift_master/meta/main.yml1
-rw-r--r--roles/openshift_master/tasks/bootstrap.yml84
-rw-r--r--roles/openshift_master/tasks/check_master_api_is_ready.yml14
-rw-r--r--roles/openshift_master/tasks/clean_systemd_units.yml9
-rw-r--r--roles/openshift_master/tasks/configure_external_etcd.yml17
-rw-r--r--roles/openshift_master/tasks/journald.yml29
-rw-r--r--roles/openshift_master/tasks/main.yml73
-rw-r--r--roles/openshift_master/tasks/registry_auth.yml13
-rw-r--r--roles/openshift_master/tasks/system_container.yml5
-rw-r--r--roles/openshift_master/tasks/systemd_units.yml49
-rw-r--r--roles/openshift_master/tasks/upgrade_facts.yml37
-rw-r--r--roles/openshift_master/templates/atomic-openshift-master.j24
-rw-r--r--roles/openshift_master/templates/master.yaml.v1.j225
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j22
-rw-r--r--roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j22
-rw-r--r--roles/openshift_master_facts/filter_plugins/openshift_master.py1
-rw-r--r--roles/openshift_master_facts/tasks/main.yml2
-rw-r--r--roles/openshift_metrics/README.md75
-rw-r--r--roles/openshift_metrics/tasks/install_cassandra.yaml1
-rw-r--r--roles/openshift_metrics/templates/hawkular_cassandra_rc.j22
-rw-r--r--roles/openshift_metrics/templates/hawkular_metrics_rc.j22
-rw-r--r--roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j22
-rw-r--r--roles/openshift_metrics/templates/heapster.j22
-rw-r--r--roles/openshift_named_certificates/tasks/named_certificates.yml32
-rw-r--r--roles/openshift_nfs/README.md17
-rw-r--r--roles/openshift_nfs/defaults/main.yml8
-rw-r--r--roles/openshift_nfs/meta/main.yml16
-rw-r--r--roles/openshift_nfs/tasks/create_export.yml34
-rw-r--r--roles/openshift_nfs/tasks/firewall.yml40
-rw-r--r--roles/openshift_nfs/tasks/setup.yml29
-rw-r--r--roles/openshift_node/defaults/main.yml17
-rw-r--r--roles/openshift_node/files/bootstrap.yml63
-rw-r--r--roles/openshift_node/handlers/main.yml11
-rw-r--r--roles/openshift_node/tasks/aws.yml21
-rw-r--r--roles/openshift_node/tasks/bootstrap.yml59
-rw-r--r--roles/openshift_node/tasks/config.yml68
-rw-r--r--roles/openshift_node/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node/tasks/config/install-node-docker-service-file.yml8
-rw-r--r--roles/openshift_node/tasks/install.yml6
-rw-r--r--roles/openshift_node/tasks/main.yml9
-rw-r--r--roles/openshift_node/tasks/node_system_container.yml5
-rw-r--r--roles/openshift_node/tasks/openvswitch_system_container.yml5
-rw-r--r--roles/openshift_node/tasks/registry_auth.yml3
-rw-r--r--roles/openshift_node/tasks/systemd_units.yml6
-rw-r--r--roles/openshift_node/templates/node.service.j26
-rw-r--r--roles/openshift_node/templates/node.yaml.v1.j26
-rw-r--r--roles/openshift_node/templates/openshift.docker.node.service1
-rw-r--r--roles/openshift_node_certificates/handlers/main.yml16
-rw-r--r--roles/openshift_node_dnsmasq/README.md27
-rw-r--r--roles/openshift_node_dnsmasq/defaults/main.yml6
-rwxr-xr-xroles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh8
-rw-r--r--roles/openshift_node_dnsmasq/tasks/network-manager.yml1
-rw-r--r--roles/openshift_node_dnsmasq/templates/origin-dns.conf.j29
-rw-r--r--roles/openshift_node_facts/tasks/main.yml1
-rw-r--r--roles/openshift_node_upgrade/README.md1
-rw-r--r--roles/openshift_node_upgrade/defaults/main.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml2
-rw-r--r--roles/openshift_node_upgrade/tasks/registry_auth.yml3
-rw-r--r--roles/openshift_node_upgrade/tasks/systemd_units.yml2
-rw-r--r--roles/openshift_node_upgrade/templates/openshift.docker.node.service1
-rw-r--r--roles/openshift_prometheus/README.md35
-rw-r--r--roles/openshift_prometheus/defaults/main.yaml56
-rw-r--r--roles/openshift_prometheus/files/openshift_prometheus.exports3
-rw-r--r--roles/openshift_prometheus/tasks/create_pvs.yaml36
-rw-r--r--roles/openshift_prometheus/tasks/install_prometheus.yaml24
-rw-r--r--roles/openshift_prometheus/tasks/nfs.yaml44
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prom-pv-server.yml.j215
-rw-r--r--roles/openshift_prometheus/templates/prometheus.j2 (renamed from roles/openshift_prometheus/templates/prometheus_deployment.j2)25
-rw-r--r--roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml4
-rw-r--r--roles/openshift_provisioners/tasks/generate_secrets.yaml4
-rw-r--r--roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml4
-rw-r--r--roles/openshift_provisioners/tasks/install_efs.yaml8
-rw-r--r--roles/openshift_provisioners/tasks/install_support.yaml17
-rw-r--r--roles/openshift_provisioners/templates/pv.j21
-rw-r--r--roles/openshift_provisioners/templates/pvc.j21
-rw-r--r--roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml12
-rw-r--r--roles/openshift_sanitize_inventory/tasks/main.yml17
-rw-r--r--roles/openshift_sanitize_inventory/tasks/unsupported.yml8
-rw-r--r--roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml112
-rw-r--r--roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml18
-rw-r--r--roles/openshift_service_catalog/tasks/generate_certs.yml27
-rw-r--r--roles/openshift_service_catalog/tasks/install.yml45
-rw-r--r--roles/openshift_service_catalog/tasks/remove.yml28
-rw-r--r--roles/openshift_service_catalog/templates/api_server.j27
-rw-r--r--roles/openshift_service_catalog/templates/controller_manager.j222
-rw-r--r--roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2 (renamed from roles/openshift_service_catalog/templates/sc_role_patching.j2)4
-rw-r--r--roles/openshift_service_catalog/templates/sc_view_role_patching.j211
-rw-r--r--roles/openshift_service_catalog/vars/openshift-enterprise.yml2
-rw-r--r--roles/openshift_storage_glusterfs/README.md14
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml135
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml136
-rw-r--r--roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml134
-rw-r--r--roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml8
-rw-r--r--roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml2
-rw-r--r--roles/openshift_storage_glusterfs/tasks/kernel_modules.yml12
-rw-r--r--roles/openshift_storage_glusterfs/templates/glusterfs.conf4
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j213
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j212
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j210
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j236
-rw-r--r--roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j249
-rw-r--r--roles/openshift_storage_nfs/tasks/main.yml3
-rw-r--r--roles/openshift_storage_nfs/templates/exports.j23
-rw-r--r--roles/openshift_version/defaults/main.yml1
-rw-r--r--roles/openshift_version/tasks/main.yml10
-rw-r--r--roles/template_service_broker/defaults/main.yml1
-rw-r--r--roles/template_service_broker/files/openshift-ansible-catalog-console.js2
-rw-r--r--roles/template_service_broker/tasks/install.yml20
-rw-r--r--roles/template_service_broker/tasks/remove.yml4
-rw-r--r--roles/tuned/defaults/main.yml3
-rw-r--r--roles/tuned/meta/main.yml13
-rw-r--r--roles/tuned/tasks/main.yml (renamed from roles/openshift_node/tasks/tuned.yml)2
-rw-r--r--roles/tuned/templates/openshift-control-plane/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf)0
-rw-r--r--roles/tuned/templates/openshift-node/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift-node/tuned.conf)0
-rw-r--r--roles/tuned/templates/openshift/tuned.conf (renamed from roles/openshift_node/templates/tuned/openshift/tuned.conf)0
-rw-r--r--roles/tuned/templates/recommend.conf (renamed from roles/openshift_node/templates/tuned/recommend.conf)9
-rw-r--r--utils/src/ooinstall/oo_config.py6
474 files changed, 25747 insertions, 4680 deletions
diff --git a/.tito/packages/openshift-ansible b/.tito/packages/openshift-ansible
index 9dcd067e5..103569fc1 100644
--- a/.tito/packages/openshift-ansible
+++ b/.tito/packages/openshift-ansible
@@ -1 +1 @@
-3.7.0-0.143.0 ./
+3.7.0-0.190.0 ./
diff --git a/docs/proposals/role_decomposition.md b/docs/proposals/role_decomposition.md
index b6c1d8c5b..6434e24e7 100644
--- a/docs/proposals/role_decomposition.md
+++ b/docs/proposals/role_decomposition.md
@@ -158,13 +158,13 @@ providing the location of the generated certificates to the individual roles.
openshift_logging_kibana_es_host: "{{ openshift_logging_es_ops_host }}"
openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
- openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
+ openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}"
openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
- openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
+ openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}"
openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
openshift_logging_kibana_ca: "{{ openshift_logging_kibana_ops_ca}}"
@@ -193,8 +193,8 @@ providing the location of the generated certificates to the individual roles.
openshift_logging_curator_image_prefix: "{{ openshift_logging_image_prefix }}"
openshift_logging_curator_image_version: "{{ openshift_logging_image_version }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
- openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
+ openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}"
openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
when:
- openshift_logging_use_ops | bool
diff --git a/files/origin-components/template-service-broker-registration.yaml b/files/origin-components/template-service-broker-registration.yaml
index 2086978f0..95fb72924 100644
--- a/files/origin-components/template-service-broker-registration.yaml
+++ b/files/origin-components/template-service-broker-registration.yaml
@@ -9,8 +9,8 @@ parameters:
required: true
objects:
# register the tsb with the service catalog
-- apiVersion: servicecatalog.k8s.io/v1alpha1
- kind: ServiceBroker
+- apiVersion: servicecatalog.k8s.io/v1beta1
+ kind: ClusterServiceBroker
metadata:
name: template-service-broker
spec:
diff --git a/filter_plugins/oo_filters.py b/filter_plugins/oo_filters.py
index 83a05370a..f9564499d 100644
--- a/filter_plugins/oo_filters.py
+++ b/filter_plugins/oo_filters.py
@@ -710,8 +710,8 @@ def oo_openshift_env(hostvars):
return facts
-# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements
-def oo_component_persistent_volumes(hostvars, groups, component):
+# pylint: disable=too-many-branches, too-many-nested-blocks, too-many-statements, too-many-locals
+def oo_component_persistent_volumes(hostvars, groups, component, subcomponent=None):
""" Generate list of persistent volumes based on oo_openshift_env
storage options set in host variables for a specific component.
"""
@@ -723,84 +723,90 @@ def oo_component_persistent_volumes(hostvars, groups, component):
persistent_volume = None
if component in hostvars['openshift']:
- if 'storage' in hostvars['openshift'][component]:
- params = hostvars['openshift'][component]['storage']
+ if subcomponent is not None:
+ storage_component = hostvars['openshift'][component][subcomponent]
+ else:
+ storage_component = hostvars['openshift'][component]
+
+ if 'storage' in storage_component:
+ params = storage_component['storage']
kind = params['kind']
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
+ if 'create_pv' in params:
+ create_pv = params['create_pv']
+ if kind is not None and create_pv:
+ if kind == 'nfs':
+ host = params['host']
+ if host is None:
+ if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
+ host = groups['oo_nfs_to_config'][0]
+ else:
+ raise errors.AnsibleFilterError("|failed no storage host detected")
+ directory = params['nfs']['directory']
+ volume = params['volume']['name']
+ path = directory + '/' + volume
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
-
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
-
- elif kind == 'glusterfs':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- endpoints = params['glusterfs']['endpoints']
- path = params['glusterfs']['path']
- read_only = params['glusterfs']['readOnly']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- glusterfs=dict(
- endpoints=endpoints,
- path=path,
- readOnly=read_only)))
-
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
+ labels = dict()
+ access_modes = params['access']['modes']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ nfs=dict(
+ server=host,
+ path=path)))
+
+ elif kind == 'openstack':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ filesystem = params['openstack']['filesystem']
+ volume_id = params['openstack']['volumeID']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ cinder=dict(
+ fsType=filesystem,
+ volumeID=volume_id)))
+
+ elif kind == 'glusterfs':
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ if 'labels' in params:
+ labels = params['labels']
+ else:
+ labels = dict()
+ access_modes = params['access']['modes']
+ endpoints = params['glusterfs']['endpoints']
+ path = params['glusterfs']['path']
+ read_only = params['glusterfs']['readOnly']
+ persistent_volume = dict(
+ name="{0}-volume".format(volume),
+ capacity=size,
+ labels=labels,
+ access_modes=access_modes,
+ storage=dict(
+ glusterfs=dict(
+ endpoints=endpoints,
+ path=path,
+ readOnly=read_only)))
+
+ elif not (kind == 'object' or kind == 'dynamic'):
+ msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
+ kind,
+ component)
+ raise errors.AnsibleFilterError(msg)
return persistent_volume
@@ -820,85 +826,10 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
persistent_volumes = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
- if 'storage' in hostvars['openshift']['hosted'][component]:
- params = hostvars['openshift']['hosted'][component]['storage']
- kind = params['kind']
- if 'create_pv' in params:
- create_pv = params['create_pv']
- if kind is not None and create_pv:
- if kind == 'nfs':
- host = params['host']
- if host is None:
- if 'oo_nfs_to_config' in groups and len(groups['oo_nfs_to_config']) > 0:
- host = groups['oo_nfs_to_config'][0]
- else:
- raise errors.AnsibleFilterError("|failed no storage host detected")
- directory = params['nfs']['directory']
- volume = params['volume']['name']
- path = directory + '/' + volume
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- nfs=dict(
- server=host,
- path=path)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'openstack':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- filesystem = params['openstack']['filesystem']
- volume_id = params['openstack']['volumeID']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- cinder=dict(
- fsType=filesystem,
- volumeID=volume_id)))
- persistent_volumes.append(persistent_volume)
- elif kind == 'glusterfs':
- volume = params['volume']['name']
- size = params['volume']['size']
- if 'labels' in params:
- labels = params['labels']
- else:
- labels = dict()
- access_modes = params['access']['modes']
- endpoints = params['glusterfs']['endpoints']
- path = params['glusterfs']['path']
- read_only = params['glusterfs']['readOnly']
- persistent_volume = dict(
- name="{0}-volume".format(volume),
- capacity=size,
- labels=labels,
- access_modes=access_modes,
- storage=dict(
- glusterfs=dict(
- endpoints=endpoints,
- path=path,
- readOnly=read_only)))
- persistent_volumes.append(persistent_volume)
- elif not (kind == 'object' or kind == 'dynamic'):
- msg = "|failed invalid storage kind '{0}' for component '{1}'".format(
- kind,
- component)
- raise errors.AnsibleFilterError(msg)
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'hosted', component)
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+
if 'logging' in hostvars['openshift']:
persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'logging')
if persistent_volume is not None:
@@ -911,10 +842,22 @@ def oo_persistent_volumes(hostvars, groups, persistent_volumes=None):
persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'metrics')
if persistent_volume is not None:
persistent_volumes.append(persistent_volume)
+ if 'prometheus' in hostvars['openshift']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+ if 'alertmanager' in hostvars['openshift']['prometheus']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertmanager')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
+ if 'alertbuffer' in hostvars['openshift']['prometheus']:
+ persistent_volume = oo_component_persistent_volumes(hostvars, groups, 'prometheus', 'alertbuffer')
+ if persistent_volume is not None:
+ persistent_volumes.append(persistent_volume)
return persistent_volumes
-def oo_component_pv_claims(hostvars, component):
+def oo_component_pv_claims(hostvars, component, subcomponent=None):
""" Generate list of persistent volume claims based on oo_openshift_env
storage options set in host variables for a speicific component.
"""
@@ -922,20 +865,27 @@ def oo_component_pv_claims(hostvars, component):
raise errors.AnsibleFilterError("|failed expects hostvars is a dict")
if component in hostvars['openshift']:
- if 'storage' in hostvars['openshift'][component]:
- params = hostvars['openshift'][component]['storage']
+ if subcomponent is not None:
+ storage_component = hostvars['openshift'][component][subcomponent]
+ else:
+ storage_component = hostvars['openshift'][component]
+
+ if 'storage' in storage_component:
+ params = storage_component['storage']
kind = params['kind']
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
- capacity=size,
- access_modes=access_modes)
- return persistent_volume_claim
+ if 'create_pv' in params:
+ if 'create_pvc' in params:
+ create_pv = params['create_pv']
+ create_pvc = params['create_pvc']
+ if kind not in [None, 'object'] and create_pv and create_pvc:
+ volume = params['volume']['name']
+ size = params['volume']['size']
+ access_modes = params['access']['modes']
+ persistent_volume_claim = dict(
+ name="{0}-claim".format(volume),
+ capacity=size,
+ access_modes=access_modes)
+ return persistent_volume_claim
return None
@@ -952,22 +902,10 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
persistent_volume_claims = []
if 'hosted' in hostvars['openshift']:
for component in hostvars['openshift']['hosted']:
- if 'storage' in hostvars['openshift']['hosted'][component]:
- params = hostvars['openshift']['hosted'][component]['storage']
- kind = params['kind']
- if 'create_pv' in params:
- if 'create_pvc' in params:
- create_pv = params['create_pv']
- create_pvc = params['create_pvc']
- if kind not in [None, 'object'] and create_pv and create_pvc:
- volume = params['volume']['name']
- size = params['volume']['size']
- access_modes = params['access']['modes']
- persistent_volume_claim = dict(
- name="{0}-claim".format(volume),
- capacity=size,
- access_modes=access_modes)
- persistent_volume_claims.append(persistent_volume_claim)
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'hosted', component)
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+
if 'logging' in hostvars['openshift']:
persistent_volume_claim = oo_component_pv_claims(hostvars, 'logging')
if persistent_volume_claim is not None:
@@ -980,6 +918,18 @@ def oo_persistent_volume_claims(hostvars, persistent_volume_claims=None):
persistent_volume_claim = oo_component_pv_claims(hostvars, 'metrics')
if persistent_volume_claim is not None:
persistent_volume_claims.append(persistent_volume_claim)
+ if 'prometheus' in hostvars['openshift']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'alertmanager' in hostvars['openshift']['prometheus']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertmanager')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
+ if 'alertbuffer' in hostvars['openshift']['prometheus']:
+ persistent_volume_claim = oo_component_pv_claims(hostvars, 'prometheus', 'alertbuffer')
+ if persistent_volume_claim is not None:
+ persistent_volume_claims.append(persistent_volume_claim)
return persistent_volume_claims
@@ -1175,6 +1125,73 @@ of items as ['region=infra', 'zone=primary']
return selectors
+def oo_filter_sa_secrets(sa_secrets, secret_hint='-token-'):
+ """Parse the Service Account Secrets list, `sa_secrets`, (as from
+oc_serviceaccount_secret:state=list) and return the name of the secret
+containing the `secret_hint` string. For example, by default this will
+return the name of the secret holding the SA bearer token.
+
+Only provide the 'results' object to this filter. This filter expects
+to receive a list like this:
+
+ [
+ {
+ "name": "management-admin-dockercfg-p31s2"
+ },
+ {
+ "name": "management-admin-token-bnqsh"
+ }
+ ]
+
+
+Returns:
+
+* `secret_name` [string] - The name of the secret matching the
+ `secret_hint` parameter. By default this is the secret holding the
+ SA's bearer token.
+
+Example playbook usage:
+
+Register a return value from oc_serviceaccount_secret with and pass
+that result to this filter plugin.
+
+ - name: Get all SA Secrets
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+ - name: Save the SA bearer token secret name
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+ - name: Get the SA bearer token value
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ register: sa_secret
+
+ - name: Print the bearer token value
+ debug:
+ var: sa_secret.results.decoded.token
+
+ """
+ secret_name = None
+
+ for secret in sa_secrets:
+ # each secret is a hash
+ if secret['name'].find(secret_hint) == -1:
+ continue
+ else:
+ secret_name = secret['name']
+ break
+
+ return secret_name
+
+
class FilterModule(object):
""" Custom ansible filter mapping """
@@ -1217,5 +1234,6 @@ class FilterModule(object):
"to_padded_yaml": to_padded_yaml,
"oo_random_word": oo_random_word,
"oo_contains_rule": oo_contains_rule,
- "oo_selector_to_string_list": oo_selector_to_string_list
+ "oo_selector_to_string_list": oo_selector_to_string_list,
+ "oo_filter_sa_secrets": oo_filter_sa_secrets,
}
diff --git a/images/installer/README_CONTAINER_IMAGE.md b/images/installer/README_CONTAINER_IMAGE.md
index bc1ebb4a8..bfe3661c0 100644
--- a/images/installer/README_CONTAINER_IMAGE.md
+++ b/images/installer/README_CONTAINER_IMAGE.md
@@ -45,4 +45,6 @@ These options may be set via the ``atomic`` ``--set`` flag. For defaults see ``r
* ANSIBLE_CONFIG - Full path for the ansible configuration file to use inside the container
-* INVENTORY_FILE - Full path for the inventory to use from the host \ No newline at end of file
+* INVENTORY_FILE - Full path for the inventory to use from the host
+
+* INVENTORY_DIR - Full path for the inventory directory to use (e.g. for use with a hybrid dynamic/static inventory)
diff --git a/images/installer/root/usr/local/bin/run b/images/installer/root/usr/local/bin/run
index 70aa0bac3..cd38a6ff0 100755
--- a/images/installer/root/usr/local/bin/run
+++ b/images/installer/root/usr/local/bin/run
@@ -19,6 +19,9 @@ if [[ -v INVENTORY_FILE ]]; then
# Make a copy so that ALLOW_ANSIBLE_CONNECTION_LOCAL below
# does not attempt to modify the original
cp -a ${INVENTORY_FILE} ${INVENTORY}
+elif [[ -v INVENTORY_DIR ]]; then
+ INVENTORY="$(mktemp -d)"
+ cp -R ${INVENTORY_DIR}/* ${INVENTORY}
elif [[ -v INVENTORY_URL ]]; then
curl -o ${INVENTORY} ${INVENTORY_URL}
elif [[ -v DYNAMIC_SCRIPT_URL ]]; then
@@ -29,7 +32,7 @@ elif [[ -v GENERATE_INVENTORY ]]; then
/usr/local/bin/generate ${INVENTORY}
else
echo
- echo "One of INVENTORY_FILE, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided."
+ echo "One of INVENTORY_FILE, INVENTORY_DIR, INVENTORY_URL, GENERATE_INVENTORY, or DYNAMIC_SCRIPT_URL must be provided."
exec /usr/local/bin/usage
fi
INVENTORY_ARG="-i ${INVENTORY}"
diff --git a/inventory/byo/hosts.ose.example b/inventory/byo/hosts.example
index 30987fa38..e49dd5fa2 100644
--- a/inventory/byo/hosts.ose.example
+++ b/inventory/byo/hosts.example
@@ -27,7 +27,8 @@ ansible_ssh_user=root
debug_level=2
# Specify the deployment type. Valid values are origin and openshift-enterprise.
-openshift_deployment_type=openshift-enterprise
+openshift_deployment_type=origin
+#openshift_deployment_type=openshift-enterprise
# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
# rely on the version running on the first master. Works best for containerized installs where we can usually
@@ -58,6 +59,8 @@ openshift_release=v3.7
#openshift_use_etcd_system_container=False
#
# In either case, system_images_registry must be specified to be able to find the system images
+#system_images_registry="docker.io"
+# when openshift_deployment_type=='openshift-enterprise'
#system_images_registry="registry.access.redhat.com"
# Manage openshift example imagestreams and templates during install and upgrade
@@ -124,15 +127,15 @@ openshift_release=v3.7
# Default value: "--log-driver=journald"
#openshift_docker_options="-l warn --ipv6=false"
+# Specify exact version of Docker to configure or upgrade to.
+# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
+# docker_version="1.12.1"
+
# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
# Uncomment below to disable; for example if your kernel does not support the
# Docker overlay/overlay2 storage drivers with SELinux enabled.
#openshift_docker_selinux_enabled=False
-# Specify exact version of Docker to configure or upgrade to.
-# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.12.1"
-
# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
# docker_upgrade=False
@@ -179,7 +182,7 @@ openshift_release=v3.7
#oreg_auth_credentials_replace: True
# OpenShift repository configuration
-#openshift_additional_repos=[{'id': 'ose-devel', 'name': 'ose-devel', 'baseurl': 'http://example.com/puddle/build/AtomicOpenShift/3.1/latest/RH7-RHOSE-3.0/$basearch/os', 'enabled': 1, 'gpgcheck': 0}]
+#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
#openshift_repos_enable_testing=false
# htpasswd auth
@@ -237,9 +240,9 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# CloudForms Management Engine (ManageIQ) App Install
#
# Enables installation of MIQ server. Recommended for dedicated
-# clusters only. See roles/openshift_cfme/README.md for instructions
+# clusters only. See roles/openshift_management/README.md for instructions
# and requirements.
-#openshift_cfme_install_app=False
+#openshift_management_install_management=False
# Cloud Provider Configuration
#
@@ -307,9 +310,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_master_cluster_hostname=openshift-ansible.test.example.com
#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-# Override the default controller lease ttl
-#osm_controller_lease_ttl=30
-
# Configure controller arguments
#osm_controller_args={'resource-quota-sync-period': ['10s']}
@@ -346,7 +346,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# default storage plugin dependencies to install, by default the ceph and
# glusterfs plugin dependencies will be installed, if available.
-#osn_storage_plugin_deps=['ceph','glusterfs']
+#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
# OpenShift Router Options
#
@@ -461,7 +461,6 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_hosted_registry_storage_volume_size=10Gi
#
# AWS S3
-#
# S3 bucket must already exist.
#openshift_hosted_registry_storage_kind=object
#openshift_hosted_registry_storage_provider=s3
@@ -549,8 +548,11 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# `/hawkular/metrics` path will break installation of metrics.
#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com
# Configure the prefix and version for the component images
-#openshift_metrics_image_prefix=registry.example.com:8888/openshift3/
-#openshift_metrics_image_version=3.7.0
+#openshift_metrics_image_prefix=docker.io/openshift/origin-
+#openshift_metrics_image_version=v3.7
+# when openshift_deployment_type=='openshift-enterprise'
+#openshift_metrics_image_prefix=registry.access.redhat.com/openshift3/
+#openshift_metrics_image_version=v3.7
#
# StorageClass
# openshift_storageclass_name=gp2
@@ -604,9 +606,83 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# this value must be 1
#openshift_logging_es_cluster_size=1
# Configure the prefix and version for the component images
-#openshift_logging_image_prefix=registry.example.com:8888/openshift3/
+#openshift_logging_image_prefix=docker.io/openshift/origin-
+#openshift_logging_image_version=v3.7.0
+# when openshift_deployment_type=='openshift-enterprise'
+#openshift_logging_image_prefix=registry.access.redhat.com/openshift3/
#openshift_logging_image_version=3.7.0
+# Prometheus deployment
+#
+# Currently prometheus deployment is disabled by default, enable it by setting this
+#openshift_hosted_prometheus_deploy=true
+#
+# Prometheus storage config
+# Option A - NFS Host Group
+# An NFS volume will be created with path "nfs_directory/volume_name"
+# on the host within the [nfs] host group. For example, the volume
+# path using these options would be "/exports/prometheus"
+#openshift_prometheus_storage_kind=nfs
+#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_storage_nfs_directory=/exports
+#openshift_prometheus_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_storage_volume_name=prometheus
+#openshift_prometheus_storage_volume_size=10Gi
+#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+#openshift_prometheus_storage_type='pvc'
+# For prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_kind=nfs
+#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
+#openshift_prometheus_alertmanager_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_volume_size=10Gi
+#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+#openshift_prometheus_alertmanager_storage_type='pvc'
+# For prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_kind=nfs
+#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
+#openshift_prometheus_alertbuffer_storage_nfs_options='*(rw,root_squash)'
+#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
+#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#openshift_prometheus_alertbuffer_storage_type='pvc'
+#
+# Option B - External NFS Host
+# NFS volume must already exist with path "nfs_directory/_volume_name" on
+# the storage_host. For example, the remote volume path using these
+# options would be "nfs.example.com:/exports/prometheus"
+#openshift_prometheus_storage_kind=nfs
+#openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_storage_host=nfs.example.com
+#openshift_prometheus_storage_nfs_directory=/exports
+#openshift_prometheus_storage_volume_name=prometheus
+#openshift_prometheus_storage_volume_size=10Gi
+#openshift_prometheus_storage_labels={'storage': 'prometheus'}
+#openshift_prometheus_storage_type='pvc'
+# For prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_kind=nfs
+#openshift_prometheus_alertmanager_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertmanager_storage_host=nfs.example.com
+#openshift_prometheus_alertmanager_storage_nfs_directory=/exports
+#openshift_prometheus_alertmanager_storage_volume_name=prometheus-alertmanager
+#openshift_prometheus_alertmanager_storage_volume_size=10Gi
+#openshift_prometheus_alertmanager_storage_labels={'storage': 'prometheus-alertmanager'}
+#openshift_prometheus_alertmanager_storage_type='pvc'
+# For prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_kind=nfs
+#openshift_prometheus_alertbuffer_storage_access_modes=['ReadWriteOnce']
+#openshift_prometheus_alertbuffer_storage_host=nfs.example.com
+#openshift_prometheus_alertbuffer_storage_nfs_directory=/exports
+#openshift_prometheus_alertbuffer_storage_volume_name=prometheus-alertbuffer
+#openshift_prometheus_alertbuffer_storage_volume_size=10Gi
+#openshift_prometheus_alertbuffer_storage_labels={'storage': 'prometheus-alertbuffer'}
+#openshift_prometheus_alertbuffer_storage_type='pvc'
+#
+# Option C - none -- Prometheus, alertmanager and alertbuffer will use emptydir volumes
+# which are destroyed when pods are deleted
+
# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
@@ -662,8 +738,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_master_api_port=8443
#openshift_master_console_port=8443
-# set RPM version for debugging purposes
-#openshift_pkg_version=-3.1.0.0
+# set exact RPM version (include - prefix)
+#openshift_pkg_version=-3.6.0
+# you may also specify version and release, ie:
+#openshift_pkg_version=-3.7.0-0.126.0.git.0.9351aae.el7
# Configure custom ca certificate
#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
@@ -675,6 +753,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Configure custom named certificates (SNI certificates)
#
+# https://docs.openshift.org/latest/install_config/certificate_customization.html
# https://docs.openshift.com/enterprise/latest/install_config/certificate_customization.html
#
# NOTE: openshift_master_named_certificates is cached on masters and is an
@@ -739,6 +818,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
# in versions >= 3.6
#openshift_use_dnsmasq=False
+
# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
# This is useful for POC environments where DNS may not actually be available yet or to set
# options like 'strict-order' to alter dnsmasq configuration.
@@ -799,8 +879,13 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# Or you may optionally define your own build overrides configuration serialized as json
#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
-# Enable template service broker by specifying one of more namespaces whose
-# templates will be served by the broker
+# Enable service catalog
+#openshift_enable_service_catalog=true
+
+# Enable template service broker (requires service catalog to be enabled, above)
+#template_service_broker_install=true
+
+# Configure one of more namespaces whose templates will be served by the TSB
#openshift_template_service_broker_namespaces=['openshift']
# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
@@ -821,7 +906,7 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
-# Enable API service auditing, available as of 3.2
+# Enable API service auditing
#openshift_master_audit_config={"enabled": true}
#
# In case you want more advanced setup for the auditlog you can
@@ -830,6 +915,10 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# exist
#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
+# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
+# by deployment_type=origin
+#openshift_enable_origin_repo=false
+
# Validity of the auto-generated OpenShift certificates in days.
# See also openshift_hosted_registry_cert_expire_days above.
#
@@ -878,9 +967,85 @@ openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true',
# You may wish to disable these or make them non fatal
#
# openshift_upgrade_pre_storage_migration_enabled=true
-# openshift_upgrade_pre_storage_migration_fatal==true
+# openshift_upgrade_pre_storage_migration_fatal=true
# openshift_upgrade_post_storage_migration_enabled=true
-# openshift_upgrade_post_storage_migration_fatal==false
+# openshift_upgrade_post_storage_migration_fatal=false
+
+######################################################################
+# CloudForms/ManageIQ (CFME/MIQ) Configuration
+
+# See the readme for full descriptions and getting started
+# instructions: ../../roles/openshift_management/README.md or go directly to
+# their definitions: ../../roles/openshift_management/defaults/main.yml
+# ../../roles/openshift_management/vars/main.yml
+#
+# Namespace for the CFME project
+#openshift_management_project: openshift-management
+
+# Namespace/project description
+#openshift_management_project_description: CloudForms Management Engine
+
+# Choose 'miq-template' for a podified database install
+# Choose 'miq-template-ext-db' for an external database install
+#
+# If you are using the miq-template-ext-db template then you must add
+# the required database parameters to the
+# openshift_management_template_parameters variable.
+#openshift_management_app_template: miq-template
+
+# Allowed options: nfs, nfs_external, preconfigured, cloudprovider.
+#openshift_management_storage_class: nfs
+
+# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a
+# netapp appliance, then you must set the hostname here. Leave the
+# value as 'false' if you are not using external NFS.
+#openshift_management_storage_nfs_external_hostname: false
+
+# [OPTIONAL] - If you are using external NFS then you must set the base
+# path to the exports location here.
+#
+# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports
+# that will back the application PV and optionally the database
+# pv. Export path definitions, relative to
+# {{ openshift_management_storage_nfs_base_dir }}
+#
+# LOCAL NFS NOTE:
+#
+# You may may also change this value if you want to change the default
+# path used for local NFS exports.
+#openshift_management_storage_nfs_base_dir: /exports
+
+# LOCAL NFS NOTE:
+#
+# You may override the automatically selected LOCAL NFS server by
+# setting this variable. Useful for testing specific task files.
+#openshift_management_storage_nfs_local_hostname: false
+
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+#openshift_management_username: admin
+#openshift_management_password: smartvm
+
+# A hash of parameters you want to override or set in the
+# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
+# your inventory file as a simple hash. Acceptable values are defined
+# under the .parameters list in files/miq-template{-ext-db}.yaml
+# Example:
+#
+# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
+#openshift_management_template_parameters: {}
+
+# Firewall configuration
+# You can open additional firewall ports by defining them as a list. of service
+# names and ports/port ranges for either masters or nodes.
+#openshift_master_open_ports=[{"service":"svc1","port":"11/tcp"}]
+#openshift_node_open_ports=[{"service":"svc2","port":"12-13/tcp"},{"service":"svc3","port":"14/udp"}]
# host group for masters
[masters]
diff --git a/inventory/byo/hosts.origin.example b/inventory/byo/hosts.origin.example
deleted file mode 100644
index c8c60bb60..000000000
--- a/inventory/byo/hosts.origin.example
+++ /dev/null
@@ -1,899 +0,0 @@
-# This is an example of a bring your own (byo) host inventory
-
-# Create an OSEv3 group that contains the masters and nodes groups
-[OSEv3:children]
-masters
-nodes
-etcd
-lb
-nfs
-
-# Set variables common for all OSEv3 hosts
-[OSEv3:vars]
-# Enable unsupported configurations, things that will yield a partially
-# functioning cluster but would not be supported for production use
-#openshift_enable_unsupported_configurations=false
-
-# SSH user, this user should allow ssh based auth without requiring a
-# password. If using ssh key based auth, then the key should be managed by an
-# ssh agent.
-ansible_ssh_user=root
-
-# If ansible_ssh_user is not root, ansible_become must be set to true and the
-# user must be configured for passwordless sudo
-#ansible_become=yes
-
-# Debug level for all OpenShift components (Defaults to 2)
-debug_level=2
-
-# Specify the deployment type. Valid values are origin and openshift-enterprise.
-openshift_deployment_type=origin
-
-# Specify the generic release of OpenShift to install. This is used mainly just during installation, after which we
-# rely on the version running on the first master. Works best for containerized installs where we can usually
-# use this to lookup the latest exact version of the container images, which is the tag actually used to configure
-# the cluster. For RPM installations we just verify the version detected in your configured repos matches this
-# release.
-openshift_release=v3.7
-
-# Specify an exact container image tag to install or configure.
-# WARNING: This value will be used for all hosts in containerized environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_image_tag=v3.7.0
-
-# Specify an exact rpm version to install or configure.
-# WARNING: This value will be used for all hosts in RPM based environments, even those that have another version installed.
-# This could potentially trigger an upgrade and downtime, so be careful with modifying this value after the cluster is set up.
-#openshift_pkg_version=-3.7.0
-
-# This enables all the system containers except for docker:
-#openshift_use_system_containers=False
-#
-# But you can choose separately each component that must be a
-# system container:
-#
-#openshift_use_openvswitch_system_container=False
-#openshift_use_node_system_container=False
-#openshift_use_master_system_container=False
-#openshift_use_etcd_system_container=False
-#
-# In either case, system_images_registry must be specified to be able to find the system images
-#system_images_registry="docker.io"
-
-# Install the openshift examples
-#openshift_install_examples=true
-
-# Configure logoutURL in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#changing-the-logout-url
-#openshift_master_logout_url=http://example.com
-
-# Configure extensionScripts in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_scripts=['/path/to/script1.js','/path/to/script2.js']
-
-# Configure extensionStylesheets in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#loading-custom-scripts-and-stylesheets
-#openshift_master_extension_stylesheets=['/path/to/stylesheet1.css','/path/to/stylesheet2.css']
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_extensions=[{'name': 'images', 'sourceDirectory': '/path/to/my_images'}]
-
-# Configure extensions in the master config for console customization
-# See: https://docs.openshift.org/latest/install_config/web_console_customization.html#serving-static-files
-#openshift_master_oauth_template=/path/to/login-template.html
-
-# Configure imagePolicyConfig in the master config
-# See: https://godoc.org/github.com/openshift/origin/pkg/cmd/server/api#ImagePolicyConfig
-#openshift_master_image_policy_config={"maxImagesBulkImportedPerRepository": 3, "disableScheduledImport": true}
-
-# Configure master API rate limits for external clients
-#openshift_master_external_ratelimit_qps=200
-#openshift_master_external_ratelimit_burst=400
-# Configure master API rate limits for loopback clients
-#openshift_master_loopback_ratelimit_qps=300
-#openshift_master_loopback_ratelimit_burst=600
-
-# Docker Configuration
-# Add additional, insecure, and blocked registries to global docker configuration
-# For enterprise deployment types we ensure that registry.access.redhat.com is
-# included if you do not include it
-#openshift_docker_additional_registries=registry.example.com
-#openshift_docker_insecure_registries=registry.example.com
-#openshift_docker_blocked_registries=registry.hacker.com
-# Disable pushing to dockerhub
-#openshift_docker_disable_push_dockerhub=True
-# Use Docker inside a System Container. Note that this is a tech preview and should
-# not be used to upgrade!
-# The following options for docker are ignored:
-# - docker_version
-# - docker_upgrade
-# The following options must not be used
-# - openshift_docker_options
-#openshift_docker_use_system_container=False
-# Instead of using docker, replacec it with cri-o
-# NOTE: This uses openshift_docker_systemcontainer_image_registry_override as it's override
-# just as container-engine does.
-#openshift_use_crio=False
-# Force the registry to use for the docker/crio system container. By default the registry
-# will be built off of the deployment type and ansible_distribution. Only
-# use this option if you are sure you know what you are doing!
-#openshift_docker_systemcontainer_image_override="registry.example.com/container-engine:latest"
-#openshift_crio_systemcontainer_image_override="registry.example.com/cri-o:latest"
-# Items added, as is, to end of /etc/sysconfig/docker OPTIONS
-# Default value: "--log-driver=journald"
-#openshift_docker_options="-l warn --ipv6=false"
-
-# Specify exact version of Docker to configure or upgrade to.
-# Downgrades are not supported and will error out. Be careful when upgrading docker from < 1.10 to > 1.10.
-# docker_version="1.12.1"
-
-# Specify whether to run Docker daemon with SELinux enabled in containers. Default is True.
-# Uncomment below to disable; for example if your kernel does not support the
-# Docker overlay/overlay2 storage drivers with SELinux enabled.
-#openshift_docker_selinux_enabled=False
-
-# Skip upgrading Docker during an OpenShift upgrade, leaves the current Docker version alone.
-# docker_upgrade=False
-
-# Specify exact version of etcd to configure or upgrade to.
-# etcd_version="3.1.0"
-# Enable etcd debug logging, defaults to false
-# etcd_debug=true
-# Set etcd log levels by package
-# etcd_log_package_levels="etcdserver=WARNING,security=DEBUG"
-
-# Upgrade Hooks
-#
-# Hooks are available to run custom tasks at various points during a cluster
-# upgrade. Each hook should point to a file with Ansible tasks defined. Suggest using
-# absolute paths, if not the path will be treated as relative to the file where the
-# hook is actually used.
-#
-# Tasks to run before each master is upgraded.
-# openshift_master_upgrade_pre_hook=/usr/share/custom/pre_master.yml
-#
-# Tasks to run to upgrade the master. These tasks run after the main openshift-ansible
-# upgrade steps, but before we restart system/services.
-# openshift_master_upgrade_hook=/usr/share/custom/master.yml
-#
-# Tasks to run after each master is upgraded and system/services have been restarted.
-# openshift_master_upgrade_post_hook=/usr/share/custom/post_master.yml
-
-
-# Alternate image format string, useful if you've got your own registry mirror
-# Configure this setting just on node or master
-#oreg_url_master=example.com/openshift3/ose-${component}:${version}
-#oreg_url_node=example.com/openshift3/ose-${component}:${version}
-# For setting the configuration globally
-#oreg_url=example.com/openshift3/ose-${component}:${version}
-# If oreg_url points to a registry other than registry.access.redhat.com we can
-# modify image streams to point at that registry by setting the following to true
-#openshift_examples_modify_imagestreams=true
-
-# OpenShift repository configuration
-#openshift_additional_repos=[{'id': 'openshift-origin-copr', 'name': 'OpenShift Origin COPR', 'baseurl': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/epel-7-$basearch/', 'enabled': 1, 'gpgcheck': 1, 'gpgkey': 'https://copr-be.cloud.fedoraproject.org/results/maxamillion/origin-next/pubkey.gpg'}]
-#openshift_repos_enable_testing=false
-
-# htpasswd auth
-openshift_master_identity_providers=[{'name': 'htpasswd_auth', 'login': 'true', 'challenge': 'true', 'kind': 'HTPasswdPasswordIdentityProvider', 'filename': '/etc/origin/master/htpasswd'}]
-# Defining htpasswd users
-#openshift_master_htpasswd_users={'user1': '<pre-hashed password>', 'user2': '<pre-hashed password>'}
-# or
-#openshift_master_htpasswd_file=<path to local pre-generated htpasswd file>
-
-# Allow all auth
-#openshift_master_identity_providers=[{'name': 'allow_all', 'login': 'true', 'challenge': 'true', 'kind': 'AllowAllPasswordIdentityProvider'}]
-
-# LDAP auth
-#openshift_master_identity_providers=[{'name': 'my_ldap_provider', 'challenge': 'true', 'login': 'true', 'kind': 'LDAPPasswordIdentityProvider', 'attributes': {'id': ['dn'], 'email': ['mail'], 'name': ['cn'], 'preferredUsername': ['uid']}, 'bindDN': '', 'bindPassword': '', 'ca': 'my-ldap-ca.crt', 'insecure': 'false', 'url': 'ldap://ldap.example.com:389/ou=users,dc=example,dc=com?uid'}]
-#
-# Configure LDAP CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "ca" key set
-# within the LDAPPasswordIdentityProvider.
-#
-#openshift_master_ldap_ca=<ca text>
-# or
-#openshift_master_ldap_ca_file=<path to local ca file to use>
-
-# OpenID auth
-#openshift_master_identity_providers=[{"name": "openid_auth", "login": "true", "challenge": "false", "kind": "OpenIDIdentityProvider", "client_id": "my_client_id", "client_secret": "my_client_secret", "claims": {"id": ["sub"], "preferredUsername": ["preferred_username"], "name": ["name"], "email": ["email"]}, "urls": {"authorize": "https://myidp.example.com/oauth2/authorize", "token": "https://myidp.example.com/oauth2/token"}, "ca": "my-openid-ca-bundle.crt"}]
-#
-# Configure OpenID CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "ca" key set
-# within the OpenIDIdentityProvider.
-#
-#openshift_master_openid_ca=<ca text>
-# or
-#openshift_master_openid_ca_file=<path to local ca file to use>
-
-# Request header auth
-#openshift_master_identity_providers=[{"name": "my_request_header_provider", "challenge": "true", "login": "true", "kind": "RequestHeaderIdentityProvider", "challengeURL": "https://www.example.com/challenging-proxy/oauth/authorize?${query}", "loginURL": "https://www.example.com/login-proxy/oauth/authorize?${query}", "clientCA": "my-request-header-ca.crt", "clientCommonNames": ["my-auth-proxy"], "headers": ["X-Remote-User", "SSO-User"], "emailHeaders": ["X-Remote-User-Email"], "nameHeaders": ["X-Remote-User-Display-Name"], "preferredUsernameHeaders": ["X-Remote-User-Login"]}]
-#
-# Configure request header CA certificate
-# Specify either the ASCII contents of the certificate or the path to
-# the local file that will be copied to the remote host. CA
-# certificate contents will be copied to master systems and saved
-# within /etc/origin/master/ with a filename matching the "clientCA"
-# key set within the RequestHeaderIdentityProvider.
-#
-#openshift_master_request_header_ca=<ca text>
-# or
-#openshift_master_request_header_ca_file=<path to local ca file to use>
-
-# CloudForms Management Engine (ManageIQ) App Install
-#
-# Enables installation of MIQ server. Recommended for dedicated
-# clusters only. See roles/openshift_cfme/README.md for instructions
-# and requirements.
-#openshift_cfme_install_app=False
-
-# Cloud Provider Configuration
-#
-# Note: You may make use of environment variables rather than store
-# sensitive configuration within the ansible inventory.
-# For example:
-#openshift_cloudprovider_aws_access_key="{{ lookup('env','AWS_ACCESS_KEY_ID') }}"
-#openshift_cloudprovider_aws_secret_key="{{ lookup('env','AWS_SECRET_ACCESS_KEY') }}"
-#
-# AWS
-#openshift_cloudprovider_kind=aws
-# Note: IAM profiles may be used instead of storing API credentials on disk.
-#openshift_cloudprovider_aws_access_key=aws_access_key_id
-#openshift_cloudprovider_aws_secret_key=aws_secret_access_key
-#
-# Openstack
-#openshift_cloudprovider_kind=openstack
-#openshift_cloudprovider_openstack_auth_url=http://openstack.example.com:35357/v2.0/
-#openshift_cloudprovider_openstack_username=username
-#openshift_cloudprovider_openstack_password=password
-#openshift_cloudprovider_openstack_domain_id=domain_id
-#openshift_cloudprovider_openstack_domain_name=domain_name
-#openshift_cloudprovider_openstack_tenant_id=tenant_id
-#openshift_cloudprovider_openstack_tenant_name=tenant_name
-#openshift_cloudprovider_openstack_region=region
-#openshift_cloudprovider_openstack_lb_subnet_id=subnet_id
-#
-# GCE
-#openshift_cloudprovider_kind=gce
-
-# Project Configuration
-#osm_project_request_message=''
-#osm_project_request_template=''
-#osm_mcs_allocator_range='s0:/2'
-#osm_mcs_labels_per_project=5
-#osm_uid_allocator_range='1000000000-1999999999/10000'
-
-# Configure additional projects
-#openshift_additional_projects={'my-project': {'default_node_selector': 'label=value'}}
-
-# Enable cockpit
-#osm_use_cockpit=true
-#
-# Set cockpit plugins
-#osm_cockpit_plugins=['cockpit-kubernetes']
-
-# Native high availability cluster method with optional load balancer.
-# If no lb group is defined, the installer assumes that a load balancer has
-# been preconfigured. For installation the value of
-# openshift_master_cluster_hostname must resolve to the load balancer
-# or to one or all of the masters defined in the inventory if no load
-# balancer is present.
-#openshift_master_cluster_method=native
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Pacemaker high availability cluster method.
-# Pacemaker HA environment must be able to self provision the
-# configured VIP. For installation openshift_master_cluster_hostname
-# must resolve to the configured VIP.
-#openshift_master_cluster_method=pacemaker
-#openshift_master_cluster_password=openshift_cluster
-#openshift_master_cluster_vip=192.168.133.25
-#openshift_master_cluster_public_vip=192.168.133.25
-#openshift_master_cluster_hostname=openshift-ansible.test.example.com
-#openshift_master_cluster_public_hostname=openshift-ansible.test.example.com
-
-# Override the default controller lease ttl
-#osm_controller_lease_ttl=30
-
-# Configure controller arguments
-#osm_controller_args={'resource-quota-sync-period': ['10s']}
-
-# Configure api server arguments
-#osm_api_server_args={'max-requests-inflight': ['400']}
-
-# default subdomain to use for exposed routes
-#openshift_master_default_subdomain=apps.test.example.com
-
-# additional cors origins
-#osm_custom_cors_origins=['foo.example.com', 'bar.example.com']
-
-# default project node selector
-#osm_default_node_selector='region=primary'
-
-# Override the default pod eviction timeout
-#openshift_master_pod_eviction_timeout=5m
-
-# Override the default oauth tokenConfig settings:
-# openshift_master_access_token_max_seconds=86400
-# openshift_master_auth_token_max_seconds=500
-
-# Override master servingInfo.maxRequestsInFlight
-#openshift_master_max_requests_inflight=500
-
-# Override master and node servingInfo.minTLSVersion and .cipherSuites
-# valid TLS versions are VersionTLS10, VersionTLS11, VersionTLS12
-# example cipher suites override, valid cipher suites are https://golang.org/pkg/crypto/tls/#pkg-constants
-#openshift_master_min_tls_version=VersionTLS12
-#openshift_master_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
-#
-#openshift_node_min_tls_version=VersionTLS12
-#openshift_node_cipher_suites=['TLS_ECDHE_RSA_WITH_AES_128_GCM_SHA256', '...']
-
-# default storage plugin dependencies to install, by default the ceph and
-# glusterfs plugin dependencies will be installed, if available.
-#osn_storage_plugin_deps=['ceph','glusterfs','iscsi']
-
-# OpenShift Router Options
-#
-# An OpenShift router will be created during install if there are
-# nodes present with labels matching the default router selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Router selector (optional)
-# Router will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_router_selector='region=infra'
-#
-# Router replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift router selector.
-#openshift_hosted_router_replicas=2
-#
-# Router force subdomain (optional)
-# A router path format to force on all routes used by this router
-# (will ignore the route host value)
-#openshift_hosted_router_force_subdomain='${name}-${namespace}.apps.example.com'
-#
-# Router certificate (optional)
-# Provide local certificate paths which will be configured as the
-# router's default certificate.
-#openshift_hosted_router_certificate={"certfile": "/path/to/router.crt", "keyfile": "/path/to/router.key", "cafile": "/path/to/router-ca.crt"}
-#
-# Manage the OpenShift Router
-#openshift_hosted_manage_router=true
-#
-# Router sharding support has been added and can be achieved by supplying the correct
-# data to the inventory. The variable to house the data is openshift_hosted_routers
-# and is in the form of a list. If no data is passed then a default router will be
-# created. There are multiple combinations of router sharding. The one described
-# below supports routers on separate nodes.
-#
-#openshift_hosted_routers=[{'name': 'router1', 'certificate': {'certfile': '/path/to/certificate/abc.crt', 'keyfile': '/path/to/certificate/abc.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router1', 'ports': ['80:80', '443:443']}, {'name': 'router2', 'certificate': {'certfile': '/path/to/certificate/xyz.crt', 'keyfile': '/path/to/certificate/xyz.key', 'cafile': '/path/to/certificate/ca.crt'}, 'replicas': 1, 'serviceaccount': 'router', 'namespace': 'default', 'stats_port': 1936, 'edits': [{'action': 'append', 'key': 'spec.template.spec.containers[0].env', 'value': {'name': 'ROUTE_LABELS', 'value': 'route=external'}}], 'images': 'openshift3/ose-${component}:${version}', 'selector': 'type=router2', 'ports': ['80:80', '443:443']}]
-
-# OpenShift Registry Console Options
-# Override the console image prefix for enterprise deployments, not used in origin
-# default is "registry.access.redhat.com/openshift3/" and the image appended is "registry-console"
-#openshift_cockpit_deployer_prefix=registry.example.com/myrepo/
-# Override image version, defaults to latest for origin, matches the product version for enterprise
-#openshift_cockpit_deployer_version=1.4.1
-
-# Openshift Registry Options
-#
-# An OpenShift registry will be created during install if there are
-# nodes present with labels matching the default registry selector,
-# "region=infra". Set openshift_node_labels per node as needed in
-# order to label nodes.
-#
-# Example:
-# [nodes]
-# node.example.com openshift_node_labels="{'region': 'infra'}"
-#
-# Registry selector (optional)
-# Registry will only be created if nodes matching this label are present.
-# Default value: 'region=infra'
-#openshift_hosted_registry_selector='region=infra'
-#
-# Registry replicas (optional)
-# Unless specified, openshift-ansible will calculate the replica count
-# based on the number of nodes matching the openshift registry selector.
-#openshift_hosted_registry_replicas=2
-#
-# Validity of the auto-generated certificate in days (optional)
-#openshift_hosted_registry_cert_expire_days=730
-#
-# Manage the OpenShift Registry
-#openshift_hosted_manage_registry=true
-
-# Registry Storage Options
-#
-# NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/registry"
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
-# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_nfs_options='*(rw,root_squash)'
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/registry"
-#openshift_hosted_registry_storage_kind=nfs
-#openshift_hosted_registry_storage_access_modes=['ReadWriteMany']
-#openshift_hosted_registry_storage_host=nfs.example.com
-# nfs_directory must conform to DNS-1123 subdomain must consist of lower case
-# alphanumeric characters, '-' or '.', and must start and end with an alphanumeric character
-#openshift_hosted_registry_storage_nfs_directory=/exports
-#openshift_hosted_registry_storage_volume_name=registry
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# Openstack
-# Volume must already exist.
-#openshift_hosted_registry_storage_kind=openstack
-#openshift_hosted_registry_storage_access_modes=['ReadWriteOnce']
-#openshift_hosted_registry_storage_openstack_filesystem=ext4
-#openshift_hosted_registry_storage_openstack_volumeID=3a650b4f-c8c5-4e0a-8ca5-eaee11f16c57
-#openshift_hosted_registry_storage_volume_size=10Gi
-#
-# AWS S3
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_encrypt=false
-#openshift_hosted_registry_storage_s3_kmskeyid=aws_kms_key_id
-#openshift_hosted_registry_storage_s3_accesskey=aws_access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=aws_secret_access_key
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-#openshift_hosted_registry_storage_s3_rootdirectory=/registry
-#openshift_hosted_registry_pullthrough=true
-#openshift_hosted_registry_acceptschema2=true
-#openshift_hosted_registry_enforcequota=true
-#
-# Any S3 service (Minio, ExoScale, ...): Basically the same as above
-# but with regionendpoint configured
-# S3 bucket must already exist.
-#openshift_hosted_registry_storage_kind=object
-#openshift_hosted_registry_storage_provider=s3
-#openshift_hosted_registry_storage_s3_accesskey=access_key_id
-#openshift_hosted_registry_storage_s3_secretkey=secret_access_key
-#openshift_hosted_registry_storage_s3_regionendpoint=https://myendpoint.example.com/
-#openshift_hosted_registry_storage_s3_bucket=bucket_name
-#openshift_hosted_registry_storage_s3_region=bucket_region
-#openshift_hosted_registry_storage_s3_chunksize=26214400
-#openshift_hosted_registry_storage_s3_rootdirectory=/registry
-#openshift_hosted_registry_pullthrough=true
-#openshift_hosted_registry_acceptschema2=true
-#openshift_hosted_registry_enforcequota=true
-#
-# Additional CloudFront Options. When using CloudFront all three
-# of the followingg variables must be defined.
-#openshift_hosted_registry_storage_s3_cloudfront_baseurl=https://myendpoint.cloudfront.net/
-#openshift_hosted_registry_storage_s3_cloudfront_privatekeyfile=/full/path/to/secret.pem
-#openshift_hosted_registry_storage_s3_cloudfront_keypairid=yourpairid
-
-# Metrics deployment
-# See: https://docs.openshift.com/enterprise/latest/install_config/cluster_metrics.html
-#
-# By default metrics are not automatically deployed, set this to enable them
-#openshift_metrics_install_metrics=true
-#
-# Storage Options
-# If openshift_metrics_storage_kind is unset then metrics will be stored
-# in an EmptyDir volume and will be deleted when the cassandra pod terminates.
-# Storage options A & B currently support only one cassandra pod which is
-# generally enough for up to 1000 pods. Additional volumes can be created
-# manually after the fact and metrics scaled per the docs.
-#
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/metrics"
-#openshift_metrics_storage_kind=nfs
-#openshift_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_metrics_storage_nfs_directory=/exports
-#openshift_metrics_storage_nfs_options='*(rw,root_squash)'
-#openshift_metrics_storage_volume_name=metrics
-#openshift_metrics_storage_volume_size=10Gi
-#openshift_metrics_storage_labels={'storage': 'metrics'}
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/metrics"
-#openshift_metrics_storage_kind=nfs
-#openshift_metrics_storage_access_modes=['ReadWriteOnce']
-#openshift_metrics_storage_host=nfs.example.com
-#openshift_metrics_storage_nfs_directory=/exports
-#openshift_metrics_storage_volume_name=metrics
-#openshift_metrics_storage_volume_size=10Gi
-#openshift_metrics_storage_labels={'storage': 'metrics'}
-#
-# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
-# your cloud platform use this.
-#openshift_metrics_storage_kind=dynamic
-#
-# Other Metrics Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_metrics/README.md
-#
-# Override metricsPublicURL in the master config for cluster metrics
-# Defaults to https://hawkular-metrics.{{openshift_master_default_subdomain}}/hawkular/metrics
-# Currently, you may only alter the hostname portion of the url, alterting the
-# `/hawkular/metrics` path will break installation of metrics.
-#openshift_metrics_hawkular_hostname=hawkular-metrics.example.com
-# Configure the prefix and version for the component images
-#openshift_metrics_image_prefix=docker.io/openshift/origin-
-#openshift_metrics_image_version=v3.7.0
-#
-# StorageClass
-# openshift_storageclass_name=gp2
-# openshift_storageclass_parameters={'type': 'gp2', 'encrypted': 'false'}
-#
-
-# Logging deployment
-#
-# Currently logging deployment is disabled by default, enable it by setting this
-#openshift_logging_install_logging=true
-#
-# Logging storage config
-# Option A - NFS Host Group
-# An NFS volume will be created with path "nfs_directory/volume_name"
-# on the host within the [nfs] host group. For example, the volume
-# path using these options would be "/exports/logging"
-#openshift_logging_storage_kind=nfs
-#openshift_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_logging_storage_nfs_directory=/exports
-#openshift_logging_storage_nfs_options='*(rw,root_squash)'
-#openshift_logging_storage_volume_name=logging
-#openshift_logging_storage_volume_size=10Gi
-#openshift_logging_storage_labels={'storage': 'logging'}
-#
-# Option B - External NFS Host
-# NFS volume must already exist with path "nfs_directory/_volume_name" on
-# the storage_host. For example, the remote volume path using these
-# options would be "nfs.example.com:/exports/logging"
-#openshift_logging_storage_kind=nfs
-#openshift_logging_storage_access_modes=['ReadWriteOnce']
-#openshift_logging_storage_host=nfs.example.com
-#openshift_logging_storage_nfs_directory=/exports
-#openshift_logging_storage_volume_name=logging
-#openshift_logging_storage_volume_size=10Gi
-#openshift_logging_storage_labels={'storage': 'logging'}
-#
-# Option C - Dynamic -- If openshift supports dynamic volume provisioning for
-# your cloud platform use this.
-#openshift_logging_storage_kind=dynamic
-#
-# Option D - none -- Logging will use emptydir volumes which are destroyed when
-# pods are deleted
-#
-# Other Logging Options -- Common items you may wish to reconfigure, for the complete
-# list of options please see roles/openshift_logging/README.md
-#
-# Configure loggingPublicURL in the master config for aggregate logging, defaults
-# to kibana.{{ openshift_master_default_subdomain }}
-#openshift_logging_kibana_hostname=logging.apps.example.com
-# Configure the number of elastic search nodes, unless you're using dynamic provisioning
-# this value must be 1
-#openshift_logging_es_cluster_size=1
-# Configure the prefix and version for the component images
-#openshift_logging_image_prefix=docker.io/openshift/origin-
-#openshift_logging_image_version=v3.7.0
-
-# Configure the multi-tenant SDN plugin (default is 'redhat/openshift-ovs-subnet')
-# os_sdn_network_plugin_name='redhat/openshift-ovs-multitenant'
-
-# Disable the OpenShift SDN plugin
-# openshift_use_openshift_sdn=False
-
-# Configure SDN cluster network and kubernetes service CIDR blocks. These
-# network blocks should be private and should not conflict with network blocks
-# in your infrastructure that pods may require access to. Can not be changed
-# after deployment.
-#
-# WARNING : Do not pick subnets that overlap with the default Docker bridge subnet of
-# 172.17.0.0/16. Your installation will fail and/or your configuration change will
-# cause the Pod SDN or Cluster SDN to fail.
-#
-# WORKAROUND : If you must use an overlapping subnet, you can configure a non conflicting
-# docker0 CIDR range by adding '--bip=192.168.2.1/24' to DOCKER_NETWORK_OPTIONS
-# environment variable located in /etc/sysconfig/docker-network.
-# When upgrading or scaling up the following must match whats in your master config!
-# Inventory: master yaml field
-# osm_cluster_network_cidr: clusterNetworkCIDR
-# openshift_portal_net: serviceNetworkCIDR
-# When installing osm_cluster_network_cidr and openshift_portal_net must be set.
-# Sane examples are provided below.
-#osm_cluster_network_cidr=10.128.0.0/14
-#openshift_portal_net=172.30.0.0/16
-
-# ExternalIPNetworkCIDRs controls what values are acceptable for the
-# service external IP field. If empty, no externalIP may be set. It
-# may contain a list of CIDRs which are checked for access. If a CIDR
-# is prefixed with !, IPs in that CIDR will be rejected. Rejections
-# will be applied first, then the IP checked against one of the
-# allowed CIDRs. You should ensure this range does not overlap with
-# your nodes, pods, or service CIDRs for security reasons.
-#openshift_master_external_ip_network_cidrs=['0.0.0.0/0']
-
-# IngressIPNetworkCIDR controls the range to assign ingress IPs from for
-# services of type LoadBalancer on bare metal. If empty, ingress IPs will not
-# be assigned. It may contain a single CIDR that will be allocated from. For
-# security reasons, you should ensure that this range does not overlap with
-# the CIDRs reserved for external IPs, nodes, pods, or services.
-#openshift_master_ingress_ip_network_cidr=172.46.0.0/16
-
-# Configure number of bits to allocate to each host's subnet e.g. 9
-# would mean a /23 network on the host.
-# When upgrading or scaling up the following must match whats in your master config!
-# Inventory: master yaml field
-# osm_host_subnet_length: hostSubnetLength
-# When installing osm_host_subnet_length must be set. A sane example is provided below.
-#osm_host_subnet_length=9
-
-# Configure master API and console ports.
-#openshift_master_api_port=8443
-#openshift_master_console_port=8443
-
-# set RPM version for debugging purposes
-#openshift_pkg_version=-1.1
-
-# Configure custom ca certificate
-#openshift_master_ca_certificate={'certfile': '/path/to/ca.crt', 'keyfile': '/path/to/ca.key'}
-#
-# NOTE: CA certificate will not be replaced with existing clusters.
-# This option may only be specified when creating a new cluster or
-# when redeploying cluster certificates with the redeploy-certificates
-# playbook.
-
-# Configure custom named certificates (SNI certificates)
-#
-# https://docs.openshift.org/latest/install_config/certificate_customization.html
-#
-# NOTE: openshift_master_named_certificates is cached on masters and is an
-# additive fact, meaning that each run with a different set of certificates
-# will add the newly provided certificates to the cached set of certificates.
-#
-# An optional CA may be specified for each named certificate. CAs will
-# be added to the OpenShift CA bundle which allows for the named
-# certificate to be served for internal cluster communication.
-#
-# If you would like openshift_master_named_certificates to be overwritten with
-# the provided value, specify openshift_master_overwrite_named_certificates.
-#openshift_master_overwrite_named_certificates=true
-#
-# Provide local certificate paths which will be deployed to masters
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "cafile": "/path/to/custom-ca1.crt"}]
-#
-# Detected names may be overridden by specifying the "names" key
-#openshift_master_named_certificates=[{"certfile": "/path/to/custom1.crt", "keyfile": "/path/to/custom1.key", "names": ["public-master-host.com"], "cafile": "/path/to/custom-ca1.crt"}]
-
-# Session options
-#openshift_master_session_name=ssn
-#openshift_master_session_max_seconds=3600
-
-# An authentication and encryption secret will be generated if secrets
-# are not provided. If provided, openshift_master_session_auth_secrets
-# and openshift_master_encryption_secrets must be equal length.
-#
-# Signing secrets, used to authenticate sessions using
-# HMAC. Recommended to use secrets with 32 or 64 bytes.
-#openshift_master_session_auth_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-#
-# Encrypting secrets, used to encrypt sessions. Must be 16, 24, or 32
-# characters long, to select AES-128, AES-192, or AES-256.
-#openshift_master_session_encryption_secrets=['DONT+USE+THIS+SECRET+b4NV+pmZNSO']
-
-# configure how often node iptables rules are refreshed
-#openshift_node_iptables_sync_period=5s
-
-# Configure nodeIP in the node config
-# This is needed in cases where node traffic is desired to go over an
-# interface other than the default network interface.
-#openshift_set_node_ip=True
-
-# Configure dnsIP in the node config
-#openshift_dns_ip=172.30.0.1
-
-# Configure node kubelet arguments. pods-per-core is valid in OpenShift Origin 1.3 or OpenShift Container Platform 3.3 and later.
-#openshift_node_kubelet_args={'pods-per-core': ['10'], 'max-pods': ['250'], 'image-gc-high-threshold': ['85'], 'image-gc-low-threshold': ['80']}
-
-# Configure logrotate scripts
-# See: https://github.com/nickhammond/ansible-logrotate
-#logrotate_scripts=[{"name": "syslog", "path": "/var/log/cron\n/var/log/maillog\n/var/log/messages\n/var/log/secure\n/var/log/spooler\n", "options": ["daily", "rotate 7", "compress", "sharedscripts", "missingok"], "scripts": {"postrotate": "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"}}]
-
-# openshift-ansible will wait indefinitely for your input when it detects that the
-# value of openshift_hostname resolves to an IP address not bound to any local
-# interfaces. This mis-configuration is problematic for any pod leveraging host
-# networking and liveness or readiness probes.
-# Setting this variable to true will override that check.
-#openshift_override_hostname_check=true
-
-# openshift_use_dnsmasq is deprecated. This must be true, or installs will fail
-# in versions >= 3.6
-#openshift_use_dnsmasq=False
-
-# Define an additional dnsmasq.conf file to deploy to /etc/dnsmasq.d/openshift-ansible.conf
-# This is useful for POC environments where DNS may not actually be available yet or to set
-# options like 'strict-order' to alter dnsmasq configuration.
-#openshift_node_dnsmasq_additional_config_file=/home/bob/ose-dnsmasq.conf
-
-# Global Proxy Configuration
-# These options configure HTTP_PROXY, HTTPS_PROXY, and NOPROXY environment
-# variables for docker and master services.
-#
-# Hosts in the openshift_no_proxy list will NOT use any globally
-# configured HTTP(S)_PROXYs. openshift_no_proxy accepts domains
-# (.example.com), and hosts (example.com), and IP addresses.
-#openshift_http_proxy=http://USER:PASSWORD@IPADDR:PORT
-#openshift_https_proxy=https://USER:PASSWORD@IPADDR:PORT
-#openshift_no_proxy='.hosts.example.com,some-host.com'
-#
-# Most environments don't require a proxy between openshift masters, nodes, and
-# etcd hosts. So automatically add those hostnames to the openshift_no_proxy list.
-# If all of your hosts share a common domain you may wish to disable this and
-# specify that domain above instead.
-#
-# For example, having hosts with FQDNs: m1.ex.com, n1.ex.com, and
-# n2.ex.com, one would simply add '.ex.com' to the openshift_no_proxy
-# variable (above) and set this value to False
-#openshift_generate_no_proxy_hosts=True
-#
-# These options configure the BuildDefaults admission controller which injects
-# configuration into Builds. Proxy related values will default to the global proxy
-# config values. You only need to set these if they differ from the global proxy settings.
-# See BuildDefaults documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_builddefaults_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_no_proxy=mycorp.com
-#openshift_builddefaults_git_http_proxy=http://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_https_proxy=https://USER:PASSWORD@HOST:PORT
-#openshift_builddefaults_git_no_proxy=mycorp.com
-#openshift_builddefaults_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
-#openshift_builddefaults_nodeselectors={'nodelabel1':'nodelabelvalue1'}
-#openshift_builddefaults_annotations={'annotationkey1':'annotationvalue1'}
-#openshift_builddefaults_resources_requests_cpu=100m
-#openshift_builddefaults_resources_requests_memory=256Mi
-#openshift_builddefaults_resources_limits_cpu=1000m
-#openshift_builddefaults_resources_limits_memory=512Mi
-
-# Or you may optionally define your own build defaults configuration serialized as json
-#openshift_builddefaults_json='{"BuildDefaults":{"configuration":{"apiVersion":"v1","env":[{"name":"HTTP_PROXY","value":"http://proxy.example.com.redhat.com:3128"},{"name":"NO_PROXY","value":"ose3-master.example.com"}],"gitHTTPProxy":"http://proxy.example.com:3128","gitNoProxy":"ose3-master.example.com","kind":"BuildDefaultsConfig"}}}'
-
-# These options configure the BuildOverrides admission controller which injects
-# configuration into Builds.
-# See BuildOverrides documentation at
-# https://docs.openshift.org/latest/admin_guide/build_defaults_overrides.html
-#openshift_buildoverrides_force_pull=true
-#openshift_buildoverrides_image_labels=[{'name':'imagelabelname1','value':'imagelabelvalue1'}]
-#openshift_buildoverrides_nodeselectors={'nodelabel1':'nodelabelvalue1'}
-#openshift_buildoverrides_annotations={'annotationkey1':'annotationvalue1'}
-
-# Or you may optionally define your own build overrides configuration serialized as json
-#openshift_buildoverrides_json='{"BuildOverrides":{"configuration":{"apiVersion":"v1","kind":"BuildDefaultsConfig","forcePull":"true"}}}'
-
-# Enable template service broker by specifying one of more namespaces whose
-# templates will be served by the broker
-#openshift_template_service_broker_namespaces=['openshift']
-
-# masterConfig.volumeConfig.dynamicProvisioningEnabled, configurable as of 1.2/3.2, enabled by default
-#openshift_master_dynamic_provisioning_enabled=False
-
-# Admission plugin config
-#openshift_master_admission_plugin_config={"ProjectRequestLimit":{"configuration":{"apiVersion":"v1","kind":"ProjectRequestLimitConfig","limits":[{"selector":{"admin":"true"}},{"maxProjects":"1"}]}},"PodNodeConstraints":{"configuration":{"apiVersion":"v1","kind":"PodNodeConstraintsConfig"}}}
-
-# Configure usage of openshift_clock role.
-#openshift_clock_enabled=true
-
-# OpenShift Per-Service Environment Variables
-# Environment variables are added to /etc/sysconfig files for
-# each OpenShift service: node, master (api and controllers).
-# API and controllers environment variables are merged in single
-# master environments.
-#openshift_master_api_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_master_controllers_env_vars={"ENABLE_HTTP2": "true"}
-#openshift_node_env_vars={"ENABLE_HTTP2": "true"}
-
-# Enable API service auditing, available as of 1.3
-#openshift_master_audit_config={"enabled": true}
-#
-# In case you want more advanced setup for the auditlog you can
-# use this line.
-# The directory in "auditFilePath" will be created if it's not
-# exist
-#openshift_master_audit_config={"enabled": true, "auditFilePath": "/var/log/openpaas-oscp-audit/openpaas-oscp-audit.log", "maximumFileRetentionDays": 14, "maximumFileSizeMegabytes": 500, "maximumRetainedFiles": 5}
-
-# Enable origin repos that point at Centos PAAS SIG, defaults to true, only used
-# by deployment_type=origin
-#openshift_enable_origin_repo=false
-
-# Validity of the auto-generated OpenShift certificates in days.
-# See also openshift_hosted_registry_cert_expire_days above.
-#
-#openshift_ca_cert_expire_days=1825
-#openshift_node_cert_expire_days=730
-#openshift_master_cert_expire_days=730
-
-# Validity of the auto-generated external etcd certificates in days.
-# Controls validity for etcd CA, peer, server and client certificates.
-#
-#etcd_ca_default_days=1825
-#
-# ServiceAccountConfig:LimitSecretRefences rejects pods that reference secrets their service accounts do not reference
-# openshift_master_saconfig_limitsecretreferences=false
-
-# Upgrade Control
-#
-# By default nodes are upgraded in a serial manner one at a time and all failures
-# are fatal, one set of variables for normal nodes, one set of variables for
-# nodes that are part of control plane as the number of hosts may be different
-# in those two groups.
-#openshift_upgrade_nodes_serial=1
-#openshift_upgrade_nodes_max_fail_percentage=0
-#openshift_upgrade_control_plane_nodes_serial=1
-#openshift_upgrade_control_plane_nodes_max_fail_percentage=0
-#
-# You can specify the number of nodes to upgrade at once. We do not currently
-# attempt to verify that you have capacity to drain this many nodes at once
-# so please be careful when specifying these values. You should also verify that
-# the expected number of nodes are all schedulable and ready before starting an
-# upgrade. If it's not possible to drain the requested nodes the upgrade will
-# stall indefinitely until the drain is successful.
-#
-# If you're upgrading more than one node at a time you can specify the maximum
-# percentage of failure within the batch before the upgrade is aborted. Any
-# nodes that do fail are ignored for the rest of the playbook run and you should
-# take care to investigate the failure and return the node to service so that
-# your cluster.
-#
-# The percentage must exceed the value, this would fail on two failures
-# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=49
-# where as this would not
-# openshift_upgrade_nodes_serial=4 openshift_upgrade_nodes_max_fail_percentage=50
-#
-# Multiple data migrations take place and if they fail they will fail the upgrade
-# You may wish to disable these or make them non fatal
-#
-# openshift_upgrade_pre_storage_migration_enabled=true
-# openshift_upgrade_pre_storage_migration_fatal==true
-# openshift_upgrade_post_storage_migration_enabled=true
-# openshift_upgrade_post_storage_migration_fatal==false
-
-# host group for masters
-[masters]
-ose3-master[1:3]-ansible.test.example.com
-
-[etcd]
-ose3-etcd[1:3]-ansible.test.example.com
-
-# NOTE: Containerized load balancer hosts are not yet supported, if using a global
-# containerized=true host variable we must set to false.
-[lb]
-ose3-lb-ansible.test.example.com containerized=false
-
-# NOTE: Currently we require that masters be part of the SDN which requires that they also be nodes
-# However, in order to ensure that your masters are not burdened with running pods you should
-# make them unschedulable by adding openshift_schedulable=False any node that's also a master.
-[nodes]
-ose3-master[1:3]-ansible.test.example.com
-ose3-node[1:2]-ansible.test.example.com openshift_node_labels="{'region': 'primary', 'zone': 'default'}"
diff --git a/openshift-ansible.spec b/openshift-ansible.spec
index c3a477bf6..009399803 100644
--- a/openshift-ansible.spec
+++ b/openshift-ansible.spec
@@ -10,7 +10,7 @@
Name: openshift-ansible
Version: 3.7.0
-Release: 0.143.0%{?dist}
+Release: 0.190.0%{?dist}
Summary: Openshift and Atomic Enterprise Ansible
License: ASL 2.0
URL: https://github.com/openshift/openshift-ansible
@@ -21,7 +21,12 @@ Requires: ansible >= 2.3
Requires: python2
Requires: python-six
Requires: tar
-Requires: openshift-ansible-docs = %{version}
+Requires: %{name}-docs = %{version}-%{release}
+Requires: %{name}-playbooks = %{version}-%{release}
+Requires: %{name}-roles = %{version}-%{release}
+Requires: %{name}-filter-plugins = %{version}-%{release}
+Requires: %{name}-lookup-plugins = %{version}-%{release}
+Requires: %{name}-callback-plugins = %{version}-%{release}
Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
@@ -64,6 +69,9 @@ rm -f %{buildroot}%{python_sitelib}/openshift_ansible/gce
mkdir -p docs/example-inventories
cp inventory/byo/* docs/example-inventories/
+# openshift-ansible-files install
+cp -rp files %{buildroot}%{_datadir}/ansible/%{name}/
+
# openshift-ansible-playbooks install
cp -rp playbooks %{buildroot}%{_datadir}/ansible/%{name}/
# remove contiv plabooks
@@ -122,6 +130,7 @@ popd
%doc README*
%license LICENSE
%dir %{_datadir}/ansible/%{name}
+%{_datadir}/ansible/%{name}/files
%{_datadir}/ansible/%{name}/library
%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
@@ -130,7 +139,7 @@ popd
# ----------------------------------------------------------------------------------
%package docs
Summary: Openshift and Atomic Enterprise Ansible documents
-Requires: %{name} = %{version}
+Requires: %{name} = %{version}-%{release}
BuildArch: noarch
%description docs
@@ -144,11 +153,11 @@ BuildArch: noarch
# ----------------------------------------------------------------------------------
%package playbooks
Summary: Openshift and Atomic Enterprise Ansible Playbooks
-Requires: %{name} = %{version}
-Requires: %{name}-roles = %{version}
-Requires: %{name}-lookup-plugins = %{version}
-Requires: %{name}-filter-plugins = %{version}
-Requires: %{name}-callback-plugins = %{version}
+Requires: %{name} = %{version}-%{release}
+Requires: %{name}-roles = %{version}-%{release}
+Requires: %{name}-lookup-plugins = %{version}-%{release}
+Requires: %{name}-filter-plugins = %{version}-%{release}
+Requires: %{name}-callback-plugins = %{version}-%{release}
BuildArch: noarch
%description playbooks
@@ -188,10 +197,10 @@ end
# openshift-ansible-roles subpackage
# ----------------------------------------------------------------------------------
Summary: Openshift and Atomic Enterprise Ansible roles
-Requires: %{name} = %{version}
-Requires: %{name}-lookup-plugins = %{version}
-Requires: %{name}-filter-plugins = %{version}
-Requires: %{name}-callback-plugins = %{version}
+Requires: %{name} = %{version}-%{release}
+Requires: %{name}-lookup-plugins = %{version}-%{release}
+Requires: %{name}-filter-plugins = %{version}-%{release}
+Requires: %{name}-callback-plugins = %{version}-%{release}
BuildArch: noarch
%description roles
@@ -206,7 +215,7 @@ BuildArch: noarch
# ----------------------------------------------------------------------------------
%package filter-plugins
Summary: Openshift and Atomic Enterprise Ansible filter plugins
-Requires: %{name} = %{version}
+Requires: %{name} = %{version}-%{release}
BuildArch: noarch
Requires: pyOpenSSL
@@ -223,7 +232,7 @@ Requires: pyOpenSSL
# ----------------------------------------------------------------------------------
%package lookup-plugins
Summary: Openshift and Atomic Enterprise Ansible lookup plugins
-Requires: %{name} = %{version}
+Requires: %{name} = %{version}-%{release}
BuildArch: noarch
%description lookup-plugins
@@ -239,7 +248,7 @@ BuildArch: noarch
# ----------------------------------------------------------------------------------
%package callback-plugins
Summary: Openshift and Atomic Enterprise Ansible callback plugins
-Requires: %{name} = %{version}
+Requires: %{name} = %{version}-%{release}
BuildArch: noarch
%description callback-plugins
@@ -256,7 +265,7 @@ BuildArch: noarch
%package -n atomic-openshift-utils
Summary: Atomic OpenShift Utilities
BuildRequires: python-setuptools
-Requires: %{name}-playbooks = %{version}
+Requires: %{name}-playbooks = %{version}-%{release}
Requires: python-click
Requires: python-setuptools
Requires: PyYAML
@@ -276,6 +285,400 @@ Atomic OpenShift Utilities includes
%changelog
+* Thu Nov 02 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.190.0
+- check presence of v2 snapshot before the migration proceeds
+ (jchaloup@redhat.com)
+- Remove delegate_to from openshift_facts within the openshift_ca role.
+ (abutcher@redhat.com)
+- Don't use possibly undefined variables in error messages
+ (tbielawa@redhat.com)
+- MTU for bootstrapping should default to openshift_node_sdn_mtu
+ (ccoleman@redhat.com)
+- Retry service account bootstrap kubeconfig creation (ccoleman@redhat.com)
+- Docker: make use of new etc/containers/registries.conf optional
+ (mgugino@redhat.com)
+- Add rules to the view ClusterRole for service catalog. (staebler@redhat.com)
+- Updating console OPENSHIFT_CONSTANTS flag for TSB (ewolinet@redhat.com)
+- GlusterFS: Fix registry storage documentation (jarrpa@redhat.com)
+- fix comment and make it visible to end-user (azagayno@redhat.com)
+- escape also custom_cors_origins (azagayno@redhat.com)
+- add comment on regexp specifics (azagayno@redhat.com)
+- escape corsAllowedOrigins regexp strings and anchor them
+ (azagayno@redhat.com)
+
+* Wed Nov 01 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.189.0
+- Stating that certificate it is required when doing SSL on ELB.
+ (kwoodson@redhat.com)
+- Ensure GCP image build instance gets cleaned up on teardown
+ (ccoleman@redhat.com)
+- Switch from bind-interfaces to bind-dynamic (sdodson@redhat.com)
+- Remove unused osm_controller_lease_ttl (mgugino@redhat.com)
+- Delete images located in a family named {{ prefix }}images
+ (ccoleman@redhat.com)
+- Use global IP to indicate node should pick DNS (ccoleman@redhat.com)
+- Remove project metadata prefixed with the cluster prefix
+ (ccoleman@redhat.com)
+- Use openshift.node.registry_url instead of oreg_url (ccoleman@redhat.com)
+- Allow master node group to wait for stable on GCP (ccoleman@redhat.com)
+- GCP cannot use AWS growpart package (ccoleman@redhat.com)
+- dnsmasq cache-size dns-forward-max change (pcameron@redhat.com)
+- Also require that we match the release (sdodson@redhat.com)
+- Add arbitrary firewall port config to master too (sdodson@redhat.com)
+- remove master.service during the non-ha to ha upgrade (jchaloup@redhat.com)
+- Removing unneeded bootstrap which moved into the product.
+ (kwoodson@redhat.com)
+- Add retry logic to docker auth credentials (mgugino@redhat.com)
+- Retry restarting journald (mgugino@redhat.com)
+- Modify StorageClass name to standard (piqin@redhat.com)
+- Give PV & PVC empty storage class to avoid being assigned default gp2
+ (mawong@redhat.com)
+- Use oc_project to ensure openshift_provisioners_project present
+ (mawong@redhat.com)
+- Fix yaml formatting (mawong@redhat.com)
+- Create default storageclass for cloudprovider openstack (piqin@redhat.com)
+- preserve the oo-install ansible_inventory_path value (rmeggins@redhat.com)
+
+* Tue Oct 31 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.188.0
+- Add dm_thin_pool for gluster use (sdodson@redhat.com)
+- Fix broken oc_secret update function (barlik@gmx.com)
+- add new clusterNetworks fields to new installs (jtanenba@redhat.com)
+- docker: Create openshift_docker_is_node_or_master variable
+ (smilner@redhat.com)
+- Correctly install cockpit (sdodson@redhat.com)
+- Glusterfs storage templates for v1.5 added (chinacoolhacker@gmail.com)
+- bug 1501599. Omit logging project from overcommit restrictions
+ (jcantril@redhat.com)
+- GlusterFS: Remove image option from heketi command (jarrpa@redhat.com)
+
+* Mon Oct 30 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.187.0
+-
+
+* Sun Oct 29 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.186.0
+-
+
+* Sat Oct 28 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.185.0
+- bug 1506073. Lower cpu request for logging when it exceeds limit
+ (jcantril@redhat.com)
+- Update the name of the service-catalog binary (staebler@redhat.com)
+- disk_availability check: include submount storage (lmeyer@redhat.com)
+
+* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.184.0
+- cri-o: Set max log size to 50 mb (mrunalp@gmail.com)
+- cri-o: open port 10010 (gscrivan@redhat.com)
+- bug 1435144. Remove uneeded upgrade in openshift_logging role
+ (jcantril@redhat.com)
+- Remove inadvertently committed inventory file (rteague@redhat.com)
+- crio: restorcon /var/lib/containers (smilner@redhat.com)
+- Correct openshift_release regular expression (rteague@redhat.com)
+- crio: Add failed_when to overlay check (smilner@redhat.com)
+- docker: set credentials when using system container (gscrivan@redhat.com)
+- Change dnsmasq to bind-interfaces + except-interfaces (mgugino@redhat.com)
+- Fix CA Bundle passed to service-catalog broker for ansible-service-broker
+ (staebler@redhat.com)
+- Renaming csr to bootstrap for consistency. (kwoodson@redhat.com)
+- Add master config upgrade hook to upgrade-all plays (mgugino@redhat.com)
+- Remove 'Not Started' status from playbook checkpoint (rteague@redhat.com)
+- Force include_role to static for loading openshift_facts module
+ (rteague@redhat.com)
+- Make openshift-ansible depend on all subpackages (sdodson@redhat.com)
+- Refactor health check playbooks (rteague@redhat.com)
+
+* Fri Oct 27 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.183.0
+-
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.182.0
+- Fixing documentation for the cert_key_path variable name.
+ (kwoodson@redhat.com)
+- Moving removal of unwanted artifacts to image_prep. (kwoodson@redhat.com)
+- Ensure journald persistence directories exist (mgugino@redhat.com)
+- Fix lint (tbielawa@redhat.com)
+- Move add_many_container_providers.yml to playbooks/byo/openshift-management
+ with a noop task include to load filter plugins. (abutcher@redhat.com)
+- Refactor adding multiple container providers (tbielawa@redhat.com)
+- Management Cleanup and Provider Integration (tbielawa@redhat.com)
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.181.0
+- Fix loop_var warnings during logging install (mgugino@redhat.com)
+- Fix typo and add detailed comments in kuryr (sngchlko@gmail.com)
+
+* Thu Oct 26 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.179.0
+- Remove pause from master service startup (rteague@redhat.com)
+- Change default in prometheus storage type to emptydir (zgalor@redhat.com)
+- Bug 1491636 - honor node selectors (jwozniak@redhat.com)
+- Sync latest imagestreams and templates (sdodson@redhat.com)
+- Remove base package install (mgugino@redhat.com)
+- etcd: remove hacks for the system container (gscrivan@redhat.com)
+- Ensure deployment_subtype is set within openshift_sanitize_inventory.
+ (abutcher@redhat.com)
+- Add installer checkpoint for prometheus (zgalor@redhat.com)
+- Remove unused registry_volume_claim variable (hansmi@vshn.ch)
+
+* Wed Oct 25 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.178.0
+- Split prometheus image defaults to prefix and version (zgalor@redhat.com)
+- Remove extraneous spaces that yamllint dislikes (staebler@redhat.com)
+- Fix edit and admin role patching for service catalog (staebler@redhat.com)
+- strip dash when comparing version with Python3 (jchaloup@redhat.com)
+- Bug 1452939 - change Logging & Metrics imagePullPolicy (jwozniak@redhat.com)
+- Remove role bindings during service catalog un-install (staebler@redhat.com)
+- Fix a few small issues in service catalog uninstall (staebler@redhat.com)
+- Remove incorrect validation for OpenIDIdentityProvider (mgugino@redhat.com)
+- Enable oreg_auth credential replace during upgrades (mgugino@redhat.com)
+- Handle bootstrap behavior in GCP template (ccoleman@redhat.com)
+- Ensure upgrades apply latest journald settings (mgugino@redhat.com)
+
+* Tue Oct 24 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.177.0
+- Check if the master service is non-ha or not (jchaloup@redhat.com)
+- Correct host group for controller restart (rteague@redhat.com)
+- Set the proper external etcd ip address when migrating embeded etcd
+ (jchaloup@redhat.com)
+- Switch to stateful set in prometheus (zgalor@redhat.com)
+- cli: use the correct name for the master system container
+ (gscrivan@redhat.com)
+- cli: do not pull again the image when using Docker (gscrivan@redhat.com)
+- verstion_gte seems unreliable on containerized installs (sdodson@redhat.com)
+- Retry reconcile in case of error and give up eventually (simo@redhat.com)
+- Updating ocp es proxy image to use openshift_logging_proxy_image_prefix if
+ specified (ewolinet@redhat.com)
+- Generate all internal hostnames of no_proxy (ghuang@redhat.com)
+- Add nfs variables documentation to README file (zgalor@redhat.com)
+- Avoid undefined variable in master sysconfig template (hansmi@vshn.ch)
+- Ensure proper variable templating for skopeo auth credentials
+ (mgugino@redhat.com)
+
+* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.176.0
+- Update defaults (fabian@fabianism.us)
+- Use service-ca.crt instead of master ca.crt (fabian@fabianism.us)
+- use master cert (fabian@fabianism.us)
+- Bug 1496426 - add asb-client secret to openshift-ansible-service-broker
+ namespace (fabian@fabianism.us)
+- docker: Move enterprise registry from pkg to main (smilner@redhat.com)
+- systemcontainers: Verify atomic.conf proxy is always configured
+ (smilner@redhat.com)
+- Add variable to control whether NetworkManager hook is installed
+ (hansmi@vshn.ch)
+
+* Mon Oct 23 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.175.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.174.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.173.0
+-
+
+* Sun Oct 22 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.172.0
+-
+
+* Sat Oct 21 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.171.0
+- Use "requests" for CPU resources instead of limits
+ (peter.portante@redhat.com)
+- [bz1501271] Attempt to use ami ssh user and default to ansible_ssh_user.
+ (kwoodson@redhat.com)
+- Fix undefined variable for master upgrades (mgugino@redhat.com)
+- Adding pre check to verify clusterid is set along with cloudprovider when
+ performing upgrade. (kwoodson@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.170.0
+- Check for container runtime prior to restarting when updating system CA
+ trust. (abutcher@redhat.com)
+- bug 1489498. preserve replica and shard settings (jcantril@redhat.com)
+- Set servingInfo.clientCA to ca.crt during upgrade. (abutcher@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.169.0
+- Initial Kuryr support (mdulko@redhat.com)
+- Indentation errors (dymurray@redhat.com)
+- Bug 1503233 - Add liveness and readiness probe checks to ASB deploymentconfig
+ (dymurray@redhat.com)
+
+* Fri Oct 20 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.168.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.167.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.166.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.165.0
+-
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.164.0
+- Change to service-signer.crt for template_service_broker CA_BUNDLE
+ (staebler@redhat.com)
+- Use service-signer.crt for ca_bundle passed to clusterservicebroker
+ (staebler@redhat.com)
+- Rename ServiceBroker to ClusterServiceBroker for ansible_service_broker task.
+ (staebler@redhat.com)
+- Add apiserver.crt to service-catalog controller-manager deployment.
+ (staebler@redhat.com)
+- Remove redundant faulty role binding ifrom
+ kubeservicecatalog_roles_bindings.yml (staebler@redhat.com)
+- Update service catalog playbook for service-catalog rc1 (staebler@redhat.com)
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.163.0
+- set use_manageiq as default (efreiber@redhat.com)
+
+* Thu Oct 19 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.162.0
+- Wait longer for stable GCP instances (ccoleman@redhat.com)
+- Remove unneeded master config updates during upgrades (mgugino@redhat.com)
+
+* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.161.0
+-
+
+* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.160.0
+- Fix pvc selector default to be empty dict instead of string
+ (zgalor@redhat.com)
+- Fix typo in setting prom-proxy memory limit (zgalor@redhat.com)
+- Do not remove files for bootstrap if resolv or dns. (kwoodson@redhat.com)
+- Fix missing docker option signature-verification (mgugino@redhat.com)
+- Fix prometheus role nfs (zgalor@redhat.com)
+
+* Wed Oct 18 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.159.0
+- Updating openshift-ansible.spec file to include files dir
+ (sdodson@redhat.com)
+- Bug 1501768: fix eventrouter nodeSelector padding (jwozniak@redhat.com)
+- Reverting proxy image version to v1.0.0 to pass CI (ewolinet@redhat.com)
+- Making travis happy (ewolinet@redhat.com)
+- cri-o: error out when node is a Docker container (gscrivan@redhat.com)
+- Rewire openshift_template_service_broker_namespaces configurable
+ (jminter@redhat.com)
+- Ensure controllerConfig.serviceServingCert is correctly set during upgrade.
+ (abutcher@redhat.com)
+- Updating pattern for elasticsearch_proxy images (ewolinet@redhat.com)
+- Updating ES proxy image prefix and version to match other components
+ (ewolinet@redhat.com)
+- Add ability to set node and master imageConfig to latest (mgugino@redhat.com)
+- Restart all controllers to force reconfiguration during upgrade
+ (sdodson@redhat.com)
+
+* Tue Oct 17 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.158.0
+- Refactor openshift-management entry point (rteague@redhat.com)
+- Add switch to enable/disable container engine's audit log being stored in ES.
+ (jkarasek@redhat.com)
+
+* Mon Oct 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.157.0
+- data migration of embedded etcd not allowed (jchaloup@redhat.com)
+- GlusterFS: remove topology reference from deploy-heketi (jarrpa@redhat.com)
+
+* Mon Oct 16 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.156.0
+- set initial etcd cluster properly during system container scale up
+ (jchaloup@redhat.com)
+
+* Sun Oct 15 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.155.0
+-
+
+* Sat Oct 14 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.154.0
+-
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.153.0
+- default groups.oo_new_etcd_to_config to an empty list (jchaloup@redhat.com)
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.152.0
+-
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.151.0
+- updated dynamic provision section for openshift metrics to support storage
+ class name (elvirkuric@gmail.com)
+
+* Fri Oct 13 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.150.0
+- Ensure upgrade playbook exits on health check failures (rteague@redhat.com)
+- Ensure docker is installed for containerized load balancers
+ (mgugino@redhat.com)
+- Fix containerized node service unit placement order (mgugino@redhat.com)
+- Provisioning Documentation Updates (mgugino@redhat.com)
+
+* Thu Oct 12 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.149.0
+- Fix broken debug_level (mgugino@redhat.com)
+- Ensure host was reached for proper conditional validation
+ (rteague@redhat.com)
+- Ensure docker service status actually changes (mgugino@redhat.com)
+- Display warnings at the end of the control plane upgrade (sdodson@redhat.com)
+- Force reconciliation of role for 3.6 (simo@redhat.com)
+- Remove etcd health check (sdodson@redhat.com)
+- migrate embedded etcd to external etcd (jchaloup@redhat.com)
+
+* Wed Oct 11 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.148.0
+- Bug 1490647 - logging-fluentd deployed with openshift_logging_use_mux=false
+ fails to start due to missing (nhosoi@redhat.com)
+- Fix typo in inventory example (rteague@redhat.com)
+- Separate tuned daemon setup into a role. (jmencak@redhat.com)
+- crio, docker: expect openshift_release to have 'v' (gscrivan@redhat.com)
+- rebase on master (maxamillion@fedoraproject.org)
+- Add fedora compatibility (maxamillion@fedoraproject.org)
+- Allow checkpoint status to work across all groups (rteague@redhat.com)
+- Add valid search when search does not exist on resolv.conf
+ (nakayamakenjiro@gmail.com)
+
+* Tue Oct 10 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.147.0
+- Add PartOf to docker systemd service unit. (mgugino@redhat.com)
+- crio: use systemd manager (gscrivan@redhat.com)
+- Ensure servingInfo.clientCA is set as ca.crt rather than ca-bundle.crt.
+ (abutcher@redhat.com)
+- crio, docker: use openshift_release when openshift_image_tag is not used
+ (gscrivan@redhat.com)
+- crio: fix typo (gscrivan@redhat.com)
+- Update registry_config.j2 (jialiu@redhat.com)
+- Update registry_config.j2 (jialiu@redhat.com)
+
+* Mon Oct 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.146.0
+- docker_image_availability: credentials to skopeo (mgugino@redhat.com)
+- Rename openshift_cfme role to openshift_management (tbielawa@redhat.com)
+
+* Mon Oct 09 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.145.0
+- add missing restart node handler to flannel (jchaloup@redhat.com)
+- Switch to configmap leader election on 3.7 upgrade (mkhan@redhat.com)
+- crio.conf.j2: sync from upstream (gscrivan@redhat.com)
+- cri-o: use overlay instead of overlay2 (gscrivan@redhat.com)
+- Ensure docker is restarted when iptables is restarted (mgugino@redhat.com)
+- Stop including origin and ose hosts example file (sdodson@redhat.com)
+- node: make node service PartOf=openvswitch.service when openshift-sdn is used
+ (dcbw@redhat.com)
+
+* Fri Oct 06 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.144.0
+- fix typo for default in etcd (mgugino@redhat.com)
+- Bumping version of service catalog image for 3.7 (ewolinet@redhat.com)
+- remove duplicate [OSEv3:children] group (jfchevrette@gmail.com)
+- Fix lint error (tbielawa@redhat.com)
+- Update hosts.ose.example (ephillipe@gmail.com)
+- Remove the no-longer-used App/DB pv size override variables from inventories
+ (tbielawa@redhat.com)
+- openshift_checks: lb and nfs do not need docker (lmeyer@redhat.com)
+- openshift_checks: use oo group names everywhere (lmeyer@redhat.com)
+- Add notes about SA token. Improve NFS validation. (tbielawa@redhat.com)
+- Hooks for installing CFME during full openshift installation
+ (tbielawa@redhat.com)
+- Documentation (tbielawa@redhat.com)
+- Import upstream templates. Do the work. Validate parameters.
+ (tbielawa@redhat.com)
+- CFME 4.6 work begins. CFME 4.5 references added to the release-3.6 branch
+ (tbielawa@redhat.com)
+- Update hosts.origin.example (ephillipe@gmail.com)
+- Add logging es prometheus endpoint (jcantril@redhat.com)
+- bug 1497401. Default logging and metrics images to 3.7 (jcantril@redhat.com)
+- Ensure docker service started prior to credentials (mgugino@redhat.com)
+- Adding support for an inventory directory/hybrid inventory
+ (esauer@redhat.com)
+- Remove unused tasks file in openshift_named_certificates (rteague@redhat.com)
+- Move node cert playbook into node config path (rteague@redhat.com)
+- Move master cert playbooks into master config path (rteague@redhat.com)
+- Move etcd cert playbooks into etcd config path (rteague@redhat.com)
+- Fix hosted selector variable migration (mgugino@redhat.com)
+- Bug 1496271 - Perserve SCC for ES local persistent storage
+ (jcantril@redhat.com)
+- Limit hosts that run openshift_version role (mgugino@redhat.com)
+- Update ansible-service-broker config to track latest broker
+ (fabian@fabianism.us)
+- fix master-facts for provisioning (mgugino@redhat.com)
+- Make provisioning steps more reusable (mgugino@redhat.com)
+- logging: honor openshift_logging_es_cpu_limit (jwozniak@redhat.com)
+- Addressing tox issues (ewolinet@redhat.com)
+- bug 1482661. Preserve ES dc nodeSelector and supplementalGroups
+ (jcantril@redhat.com)
+- Checking if any openshift_*_storage_kind variables are set to dynamic without
+ enabling dynamic provisioning (ewolinet@redhat.com)
+- Removing setting pvc size and dynamic to remove looped var setting
+ (ewolinet@redhat.com)
+
* Wed Oct 04 2017 Jenkins CD Merge Bot <smunilla@redhat.com> 3.7.0-0.143.0
- Limit base-package install during master upgrades (mgugino@redhat.com)
- Fix provisiong scale group and elb logic (mgugino@redhat.com)
diff --git a/playbooks/aws/BUILD_AMI.md b/playbooks/aws/BUILD_AMI.md
new file mode 100644
index 000000000..468264a9a
--- /dev/null
+++ b/playbooks/aws/BUILD_AMI.md
@@ -0,0 +1,21 @@
+# Build AMI
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1. Create an instance, using a specified ssh key.
+2. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
+3. Create the AMI.
+4. If encryption is desired
+ - A KMS key is created with the name of $clusterid
+ - An encrypted AMI will be produced with $clusterid KMS key
+5. Terminate the instance used to configure the AMI.
+
+More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
+```
+# openshift_aws_ami_encrypt: True # defaults to false
+```
+
+**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
diff --git a/playbooks/aws/PREREQUISITES.md b/playbooks/aws/PREREQUISITES.md
new file mode 100644
index 000000000..4f428dcc3
--- /dev/null
+++ b/playbooks/aws/PREREQUISITES.md
@@ -0,0 +1,40 @@
+# Prerequisites
+
+When seeking to deploy a working openshift cluster using these plays, a few
+items must be in place.
+
+These are:
+
+1) vpc
+2) security group to build the AMI in.
+3) ssh keys to log into instances
+
+These items can be provisioned ahead of time, or you can utilize the plays here
+to create these items.
+
+If you wish to provision these items yourself, or you already have these items
+provisioned and wish to utilize existing components, please refer to
+provisioning_vars.yml.example.
+
+If you wish to have these items created for you, continue with this document.
+
+# Running prerequisites.yml
+
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+## Step 1:
+Ensure you have specified all the necessary provisioning variables. See
+provisioning_vars.example.yml and README.md for more information.
+
+## Step 2:
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
+```
+
+This will create a VPC, security group, and ssh_key. These plays are idempotent,
+and multiple runs should result in no additional provisioning of these components.
+
+You can also verify that you will successfully utilize existing components with
+these plays.
diff --git a/playbooks/aws/README.md b/playbooks/aws/README.md
index 816cb35b4..417fb539a 100644
--- a/playbooks/aws/README.md
+++ b/playbooks/aws/README.md
@@ -8,6 +8,13 @@ With recent desire for provisioning from customers and developers alike, the AWS
deploy highly scalable Openshift clusters utilizing AWS auto scale groups and
custom AMIs.
+To speed in the provisioning of medium and large clusters, openshift-node
+instances are created using a pre-built AMI. A list of pre-built AMIs will
+be available soon.
+
+If the deployer wishes to build their own AMI for provisioning, instructions
+to do so are provided here.
+
### Where do I start?
Before any provisioning may occur, AWS account credentials must be present in the environment. This can be done in two ways:
@@ -31,8 +38,13 @@ Before any provisioning may occur, AWS account credentials must be present in th
### Let's Provision!
-The newly added playbooks are the following:
-- build_ami.yml - Builds a custom AMI. This currently requires the user to supply a valid AMI with access to repositories that contain openshift repositories.
+Warning: Running these plays will provision items in your AWS account (if not
+present), and you may incur billing charges. These plays are not suitable
+for the free-tier.
+
+#### High-level overview
+- prerequisites.yml - Provision VPC, Security Groups, SSH keys, if needed. See PREREQUISITES.md for more information.
+- build_ami.yml - Builds a custom AMI. See BUILD_AMI.md for more information.
- provision.yml - Create a vpc, elbs, security groups, launch config, asg's, etc.
- install.yml - Calls the openshift-ansible installer on the newly created instances
- provision_nodes.yml - Creates the infra and compute node scale groups
@@ -41,82 +53,39 @@ The newly added playbooks are the following:
The current expected work flow should be to provide an AMI with access to Openshift repositories. There should be a repository specified in the `openshift_additional_repos` parameter of the inventory file. The next expectation is a minimal set of values in the `provisioning_vars.yml` file to configure the desired settings for cluster instances. These settings are AWS specific and should be tailored to the consumer's AWS custom account settings.
+Values specified in provisioning_vars.yml may instead be specified in your inventory group_vars
+under the appropriate groups. Most variables can exist in the 'all' group.
+
```yaml
---
-# when creating an AMI set this to True
-# when installing a cluster set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-# openshift_aws_clusterid: default
-
-# specify a region
-# openshift_aws_region: us-east-1
-
-# must specify a base_ami when building an AMI
-# openshift_aws_base_ami: # base image for AMI to build from
-# specify when using a custom AMI
-# openshift_aws_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-# openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-# openshift_aws_iam_cert_path: '/path/to/cert/wildcard.<clusterid>.<domain>.com.crt'
-# openshift_aws_iam_cert_key_path: '/path/to/key/wildcard.<clusterid>.<domain>.com.key'
-# openshift_aws_iam_cert_chain_path: '/path/to/ca_cert_file/ca.crt'
-
-# This is required for any ec2 instances
-# openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
+# Minimum mandatory provisioning variables. See provisioning_vars.yml.example.
+# for more information.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+openshift_release: # example: v3.7
+openshift_pkg_version: # example: -3.7.0
+openshift_aws_ssh_key_name: # example: myuser_key
+openshift_aws_base_ami: # example: ami-12345678
+# These are required when doing SSL on the ELBs
+openshift_aws_iam_cert_path: # example: '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_cert_key_path: # example: '/path/to/wildcard.<clusterid>.example.com.key'
```
If customization is required for the instances, scale groups, or any other configurable option please see the ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml) for variables and overrides. These overrides can be placed in the `provisioning_vars.yml`, `inventory`, or `group_vars`.
-In order to create the bootstrap-able AMI we need to create an openshift-ansible inventory file. This file enables us to create the AMI using the openshift-ansible node roles. The exception here is that there will be no hosts specified by the inventory file. Here is an example:
-
-```ini
-[OSEv3:children]
-masters
-nodes
-etcd
-
-[OSEv3:vars]
-################################################################################
-# Ensure these variables are set for bootstrap
-################################################################################
-# openshift_deployment_type is required for installation
-openshift_deployment_type=origin
+In order to create the bootstrap-able AMI we need to create a basic openshift-ansible inventory. This enables us to create the AMI using the openshift-ansible node roles. This inventory should not include any hosts, but certain variables should be defined in the appropriate groups, just as deploying a cluster
+using the normal openshift-ansible method. See provisioning-inventory.example.ini for an example.
-# required when building an AMI. This will
-# be dependent on the version provided by the yum repository
-openshift_pkg_version=-3.6.0
-
-openshift_master_bootstrap_enabled=True
-
-openshift_hosted_router_wait=False
-openshift_hosted_registry_wait=False
-
-# Repository for installation
-openshift_additional_repos=[{'name': 'openshift-repo', 'id': 'openshift-repo', 'baseurl': 'https://mirror.openshift.com/enterprise/enterprise-3.6/latest/x86_64/os/', 'enabled': 'yes', 'gpgcheck': 0, 'sslverify': 'no', 'sslclientcert': '/var/lib/yum/client-cert.pem', 'sslclientkey': '/var/lib/yum/client-key.pem', 'gpgkey': 'https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-release https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-beta https://mirror.ops.rhcloud.com/libra/keys/RPM-GPG-KEY-redhat-openshifthosted'}]
-
-################################################################################
-# cluster specific settings maybe be placed here
+There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
-[masters]
+#### Step 0 (optional)
-[etcd]
+You may provision a VPC, Security Group, and SSH keypair to build the AMI.
-[nodes]
+```
+$ ansible-playbook -i inventory.yml prerequisites.yml -e @provisioning_vars.yml
```
-There are more examples of cluster inventory settings [`here`](../../inventory/byo/).
+See PREREQUISITES.md for more information.
#### Step 1
@@ -126,24 +95,6 @@ Once the `inventory` and the `provisioning_vars.yml` file has been updated with
$ ansible-playbook -i inventory.yml build_ami.yml -e @provisioning_vars.yml
```
-1. This script will build a VPC. Default name will be clusterid if not specified.
-2. Create an ssh key required for the instance.
-3. Create a security group.
-4. Create an instance using the key from step 2 or a specified key.
-5. Run openshift-ansible setup roles to ensure packages and services are correctly configured.
-6. Create the AMI.
-7. If encryption is desired
- - A KMS key is created with the name of $clusterid
- - An encrypted AMI will be produced with $clusterid KMS key
-8. Terminate the instance used to configure the AMI.
-
-More AMI specific options can be found in ['openshift_aws/defaults/main.yml'](../../roles/openshift_aws/defaults/main.yml). When creating an encrypted AMI please specify use_encryption:
-```
-# openshift_aws_ami_encrypt: True # defaults to false
-```
-
-**Note**: This will ensure to take the recently created AMI and encrypt it to be used later. If encryption is not desired then set the value to false (defaults to false). The AMI id will be fetched and used according to its most recent creation date.
-
#### Step 2
Now that we have created an AMI for our Openshift installation, there are two ways to use the AMI.
@@ -167,16 +118,14 @@ $ ansible-playbook provision.yml -e @provisioning_vars.yml
```
This playbook runs through the following steps:
-1. Ensures a VPC is created.
-2. Ensures a SSH key exists.
-3. Creates an s3 bucket for the registry named $clusterid-docker-registry
-4. Create master security groups.
-5. Create a master launch config.
-6. Create the master auto scaling groups.
-7. If certificates are desired for ELB, they will be uploaded.
-8. Create internal and external master ELBs.
-9. Add newly created masters to the correct groups.
-10. Set a couple of important facts for the masters.
+1. Creates an s3 bucket for the registry named $clusterid-docker-registry
+2. Create master security groups.
+3. Create a master launch config.
+4. Create the master auto scaling groups.
+5. If certificates are desired for ELB, they will be uploaded.
+6. Create internal and external master ELBs.
+7. Add newly created masters to the correct groups.
+8. Set a couple of important facts for the masters.
At this point we have successfully created the infrastructure including the master nodes.
@@ -195,13 +144,13 @@ Once this playbook completes, the cluster masters should be installed and config
#### Step 5
-Now that we have a cluster deployed it will be more interesting to create some node types. This can be done easily with the following playbook:
+Now that we have the cluster masters deployed, we need to deploy our infrastructure and compute nodes:
```
$ ansible-playbook provision_nodes.yml -e @provisioning_vars.yml
```
-Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator.
+Once this playbook completes, it should create the compute and infra node scale groups. These nodes will attempt to register themselves to the cluster. These requests must be approved by an administrator in Step 6.
#### Step 6
diff --git a/playbooks/aws/openshift-cluster/accept.yml b/playbooks/aws/openshift-cluster/accept.yml
index ffc367f9f..c2c8bea50 100755
--- a/playbooks/aws/openshift-cluster/accept.yml
+++ b/playbooks/aws/openshift-cluster/accept.yml
@@ -42,12 +42,12 @@
until: "'instances' in instancesout and instancesout.instances|length > 0"
- debug:
- msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}"
+ msg: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
- name: approve nodes
oc_adm_csr:
#approve_all: True
- nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list | regex_replace('.ec2.internal') }}"
- timeout: 0
+ nodes: "{{ instancesout.instances|map(attribute='private_dns_name') | list }}"
+ timeout: 60
register: nodeout
delegate_to: "{{ mastersout.instances[0].public_ip_address }}"
diff --git a/playbooks/aws/openshift-cluster/build_ami.yml b/playbooks/aws/openshift-cluster/build_ami.yml
index 1e54f0467..fae30eb0a 100644
--- a/playbooks/aws/openshift-cluster/build_ami.yml
+++ b/playbooks/aws/openshift-cluster/build_ami.yml
@@ -17,71 +17,24 @@
- name: openshift_aws_region
msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
- - name: create an instance and prepare for ami
- include_role:
- name: openshift_aws
- tasks_from: build_ami.yml
- vars:
- openshift_aws_node_group_type: compute
-
- - name: fetch newly created instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:Name": "{{ openshift_aws_base_ami_name | default('ami_base') }}"
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ instancesout.instances[0].public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
-
- - name: add host to nodes
- add_host:
- groups: nodes
- name: "{{ instancesout.instances[0].public_dns_name }}"
+- include: provision_instance.yml
+ vars:
+ openshift_aws_node_group_type: compute
- hosts: nodes
gather_facts: False
tasks:
- name: set the user to perform installation
set_fact:
- ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default('root') }}"
-
-- name: normalize groups
- include: ../../byo/openshift-cluster/initialize_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/evaluate_groups.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/initialize_facts.yml
-
-- name: run the std_include
- include: ../../common/openshift-cluster/initialize_openshift_repos.yml
-
-- name: run node config setup
- include: ../../common/openshift-node/setup.yml
-
-- name: run node config
- include: ../../common/openshift-node/configure_nodes.yml
-
-- name: Re-enable excluders
- include: ../../common/openshift-node/enable_excluders.yml
-
-- hosts: localhost
- connection: local
- become: no
- tasks:
- - name: seal the ami
- include_role:
- name: openshift_aws
- tasks_from: seal_ami.yml
- vars:
- openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
+ ansible_ssh_user: "{{ openshift_aws_build_ami_ssh_user | default(ansible_ssh_user) }}"
+ openshift_node_bootstrap: True
+ openshift_node_image_prep_packages:
+ - cloud-utils-growpart
+
+# This is the part that installs all of the software and configs for the instance
+# to become a node.
+- include: ../../common/openshift-node/image_prep.yml
+
+- include: seal_ami.yml
+ vars:
+ openshift_aws_ami_name: "openshift-gi-{{ lookup('pipe', 'date +%Y%m%d%H%M')}}"
diff --git a/playbooks/aws/openshift-cluster/install.yml b/playbooks/aws/openshift-cluster/install.yml
index 86d58a68e..4d0bf9531 100644
--- a/playbooks/aws/openshift-cluster/install.yml
+++ b/playbooks/aws/openshift-cluster/install.yml
@@ -1,68 +1,19 @@
---
-- name: Setup the vpc and the master node group
+- name: Setup the master node group
hosts: localhost
tasks:
- - name: Alert user to variables needed - clusterid
- debug:
- msg: "openshift_aws_clusterid={{ openshift_aws_clusterid | default('default') }}"
-
- - name: Alert user to variables needed - region
- debug:
- msg: "openshift_aws_region={{ openshift_aws_region | default('us-east-1') }}"
-
- - name: fetch newly created instances
- ec2_remote_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- filters:
- "tag:clusterid": "{{ openshift_aws_clusterid | default('default') }}"
- "tag:host-type": master
- instance-state-name: running
- register: instancesout
- retries: 20
- delay: 3
- until: instancesout.instances|length > 0
-
- - name: add new master to masters group
- add_host:
- groups: "masters,etcd,nodes"
- name: "{{ item.public_ip_address }}"
- hostname: "{{ openshift_aws_clusterid | default('default') }}-master-{{ item.id[:-5] }}"
- with_items: "{{ instancesout.instances }}"
-
- - name: wait for ssh to become available
- wait_for:
- port: 22
- host: "{{ item.public_ip_address }}"
- timeout: 300
- search_regex: OpenSSH
- with_items: "{{ instancesout.instances }}"
+ - include_role:
+ name: openshift_aws
+ tasks_from: setup_master_group.yml
- name: set the master facts for hostname to elb
hosts: masters
gather_facts: no
remote_user: root
tasks:
- - name: fetch elbs
- ec2_elb_facts:
- region: "{{ openshift_aws_region | default('us-east-1') }}"
- names:
- - "{{ item }}"
- with_items:
- - "{{ openshift_aws_clusterid | default('default') }}-master-external"
- - "{{ openshift_aws_clusterid | default('default') }}-master-internal"
- delegate_to: localhost
- register: elbs
-
- - debug: var=elbs
-
- - name: set fact
- set_fact:
- openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
- osm_custom_cors_origins:
- - "{{ elbs.results[1].elbs[0].dns_name }}"
- - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
- - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
- with_items: "{{ groups['masters'] }}"
+ - include_role:
+ name: openshift_aws
+ tasks_from: master_facts.yml
- name: normalize groups
include: ../../byo/openshift-cluster/initialize_groups.yml
diff --git a/playbooks/aws/openshift-cluster/prerequisites.yml b/playbooks/aws/openshift-cluster/prerequisites.yml
new file mode 100644
index 000000000..df77fe3bc
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/prerequisites.yml
@@ -0,0 +1,8 @@
+---
+- include: provision_vpc.yml
+
+- include: provision_ssh_keypair.yml
+
+- include: provision_sec_group.yml
+ vars:
+ openshift_aws_node_group_type: compute
diff --git a/playbooks/aws/openshift-cluster/provision.yml b/playbooks/aws/openshift-cluster/provision.yml
index 8f018abd0..4b5bd22ea 100644
--- a/playbooks/aws/openshift-cluster/provision.yml
+++ b/playbooks/aws/openshift-cluster/provision.yml
@@ -1,5 +1,5 @@
---
-- name: Setup the vpc and the master node group
+- name: Setup the elb and the master node group
hosts: localhost
tasks:
diff --git a/playbooks/aws/openshift-cluster/provision_instance.yml b/playbooks/aws/openshift-cluster/provision_instance.yml
new file mode 100644
index 000000000..6e843453c
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_instance.yml
@@ -0,0 +1,12 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_node_group_type' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: provision_instance.yml
diff --git a/playbooks/aws/openshift-cluster/provision_sec_group.yml b/playbooks/aws/openshift-cluster/provision_sec_group.yml
new file mode 100644
index 000000000..039357adb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_sec_group.yml
@@ -0,0 +1,13 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_node_group_type' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: security_group.yml
+ when: openshift_aws_create_security_groups | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
new file mode 100644
index 000000000..3ec683958
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_ssh_keypair.yml
@@ -0,0 +1,12 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create an instance and prepare for ami
+ include_role:
+ name: openshift_aws
+ tasks_from: ssh_keys.yml
+ vars:
+ openshift_aws_node_group_type: compute
+ when: openshift_aws_users | default([]) | length > 0
diff --git a/playbooks/aws/openshift-cluster/provision_vpc.yml b/playbooks/aws/openshift-cluster/provision_vpc.yml
new file mode 100644
index 000000000..0a23a6d32
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/provision_vpc.yml
@@ -0,0 +1,10 @@
+---
+- hosts: localhost
+ connection: local
+ gather_facts: no
+ tasks:
+ - name: create a vpc
+ include_role:
+ name: openshift_aws
+ tasks_from: vpc.yml
+ when: openshift_aws_create_vpc | default(True) | bool
diff --git a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml b/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
deleted file mode 100644
index 28eb9c993..000000000
--- a/playbooks/aws/openshift-cluster/provisioning_vars.example.yml
+++ /dev/null
@@ -1,28 +0,0 @@
----
-# when creating an AMI set this option to True
-# when installing the cluster, set this to False
-openshift_node_bootstrap: True
-
-# specify a clusterid
-#openshift_aws_clusterid: default
-
-# must specify a base_ami when building an AMI
-#openshift_aws_base_ami:
-
-# when creating an encrypted AMI please specify use_encryption
-#openshift_aws_ami_encrypt: False
-
-# custom certificates are required for the ELB
-#openshift_aws_iam_cert_path: '/path/to/wildcard.<clusterid>.example.com.crt'
-#openshift_aws_iam_key_path: '/path/to/wildcard.<clusterid>.example.com.key'
-#openshift_aws_iam_cert_chain_path: '/path/to/cert.ca.crt'
-
-# This is required for any ec2 instances
-#openshift_aws_ssh_key_name: myuser_key
-
-# This will ensure these users are created
-#openshift_aws_users:
-#- key_name: myuser_key
-# username: myuser
-# pub_key: |
-# ssh-rsa AAAA
diff --git a/playbooks/aws/openshift-cluster/seal_ami.yml b/playbooks/aws/openshift-cluster/seal_ami.yml
new file mode 100644
index 000000000..8239a64fb
--- /dev/null
+++ b/playbooks/aws/openshift-cluster/seal_ami.yml
@@ -0,0 +1,12 @@
+---
+# If running this play directly, be sure the variable
+# 'openshift_aws_ami_name' is set correctly for your usage.
+# See build_ami.yml for an example.
+- hosts: localhost
+ connection: local
+ become: no
+ tasks:
+ - name: seal the ami
+ include_role:
+ name: openshift_aws
+ tasks_from: seal_ami.yml
diff --git a/playbooks/aws/provisioning-inventory.example.ini b/playbooks/aws/provisioning-inventory.example.ini
new file mode 100644
index 000000000..238a7eb2f
--- /dev/null
+++ b/playbooks/aws/provisioning-inventory.example.ini
@@ -0,0 +1,25 @@
+[OSEv3:children]
+masters
+nodes
+etcd
+
+[OSEv3:vars]
+################################################################################
+# Ensure these variables are set for bootstrap
+################################################################################
+# openshift_deployment_type is required for installation
+openshift_deployment_type=origin
+
+openshift_master_bootstrap_enabled=True
+
+openshift_hosted_router_wait=False
+openshift_hosted_registry_wait=False
+
+################################################################################
+# cluster specific settings maybe be placed here
+
+[masters]
+
+[etcd]
+
+[nodes]
diff --git a/playbooks/aws/provisioning_vars.yml.example b/playbooks/aws/provisioning_vars.yml.example
new file mode 100644
index 000000000..1491fb868
--- /dev/null
+++ b/playbooks/aws/provisioning_vars.yml.example
@@ -0,0 +1,120 @@
+---
+# Variables that are commented in this file are optional; uncommented variables
+# are mandatory.
+
+# Default values for each variable are provided, as applicable.
+# Example values for mandatory variables are provided as a comment at the end
+# of the line.
+
+# ------------------------ #
+# Common/Cluster Variables #
+# ------------------------ #
+# Variables in this section affect all areas of the cluster
+
+# Deployment type must be specified.
+openshift_deployment_type: # 'origin' or 'openshift-enterprise'
+
+# openshift_release must be specified. Use whatever version of openshift
+# that is supported by openshift-ansible that you wish.
+openshift_release: # v3.7
+
+# This will be dependent on the version provided by the yum repository
+openshift_pkg_version: # -3.7.0
+
+# specify a clusterid
+# This value is also used as the default value for many other components.
+#openshift_aws_clusterid: default
+
+# AWS region
+# This value will instruct the plays where all items should be created.
+# Multi-region deployments are not supported using these plays at this time.
+#openshift_aws_region: us-east-1
+
+#openshift_aws_create_launch_config: true
+#openshift_aws_create_scale_group: true
+
+# --- #
+# VPC #
+# --- #
+
+# openshift_aws_create_vpc defaults to true. If you don't wish to provision
+# a vpc, set this to false.
+#openshift_aws_create_vpc: true
+
+# Name of the vpc. Needs to be set if using a pre-existing vpc.
+#openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
+
+# Name of the subnet in the vpc to use. Needs to be set if using a pre-existing
+# vpc + subnet.
+#openshift_aws_subnet_name:
+
+# -------------- #
+# Security Group #
+# -------------- #
+
+# openshift_aws_create_security_groups defaults to true. If you wish to use
+# an existing security group, set this to false.
+#openshift_aws_create_security_groups: true
+
+# openshift_aws_build_ami_group is the name of the security group to build the
+# ami in. This defaults to the value of openshift_aws_clusterid.
+#openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
+
+# openshift_aws_launch_config_security_groups specifies the security groups to
+# apply to the launch config. The launch config security groups will be what
+# the cluster actually is deployed in.
+#openshift_aws_launch_config_security_groups: see roles/openshift_aws/defaults.yml
+
+# openshift_aws_node_security_groups are created when
+# openshift_aws_create_security_groups is set to true.
+#openshift_aws_node_security_groups: see roles/openshift_aws/defaults.yml
+
+# -------- #
+# ssh keys #
+# -------- #
+
+# Specify the key pair name here to connect to the provisioned instances. This
+# can be an existing key, or it can be one of the keys specified in
+# openshift_aws_users
+openshift_aws_ssh_key_name: # myuser_key
+
+# This will ensure these user and public keys are created.
+#openshift_aws_users:
+#- key_name: myuser_key
+# username: myuser
+# pub_key: |
+# ssh-rsa AAAA
+
+# When building the AMI, specify the user to ssh to the instance as.
+# openshift_aws_build_ami_ssh_user: root
+
+# --------- #
+# AMI Build #
+# --------- #
+# Variables in this section apply to building a node AMI for use in your
+# openshift cluster.
+
+# must specify a base_ami when building an AMI
+openshift_aws_base_ami: # ami-12345678
+
+# when creating an encrypted AMI please specify use_encryption
+#openshift_aws_ami_encrypt: False
+
+# -- #
+# S3 #
+# -- #
+
+# Create an s3 bucket.
+#openshift_aws_create_s3: True
+
+# --- #
+# ELB #
+# --- #
+
+# openshift_aws_elb_name will be the base-name of the ELBs.
+#openshift_aws_elb_name: "{{ openshift_aws_clusterid }}"
+
+# custom certificates are required for the ELB
+openshift_aws_iam_cert_path: # '/path/to/wildcard.<clusterid>.example.com.crt'
+openshift_aws_iam_cert_key_path: # '/path/to/wildcard.<clusterid>.example.com.key'
+openshift_aws_iam_cert_chain_path: # '/path/to/cert.ca.crt'
diff --git a/playbooks/byo/openshift-cfme/uninstall.yml b/playbooks/byo/openshift-cfme/uninstall.yml
deleted file mode 100644
index c8ed16859..000000000
--- a/playbooks/byo/openshift-cfme/uninstall.yml
+++ /dev/null
@@ -1,6 +0,0 @@
----
-# - include: ../openshift-cluster/initialize_groups.yml
-# tags:
-# - always
-
-- include: ../../common/openshift-cfme/uninstall.yml
diff --git a/playbooks/byo/openshift-cluster/config.yml b/playbooks/byo/openshift-cluster/config.yml
index 60fa44c5b..f2e52782b 100644
--- a/playbooks/byo/openshift-cluster/config.yml
+++ b/playbooks/byo/openshift-cluster/config.yml
@@ -8,5 +8,3 @@
- always
- include: ../../common/openshift-cluster/config.yml
- vars:
- openshift_deployment_subtype: "{{ deployment_subtype | default(none) }}"
diff --git a/playbooks/byo/openshift-etcd/embedded2external.yml b/playbooks/byo/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..6690a7624
--- /dev/null
+++ b/playbooks/byo/openshift-etcd/embedded2external.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/std_include.yml
+
+- include: ../../common/openshift-etcd/embedded2external.yml
diff --git a/playbooks/byo/openshift-management/add_container_provider.yml b/playbooks/byo/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..3378b5abd
--- /dev/null
+++ b/playbooks/byo/openshift-management/add_container_provider.yml
@@ -0,0 +1,6 @@
+---
+- include: ../openshift-cluster/initialize_groups.yml
+
+- include: ../../common/openshift-cluster/evaluate_groups.yml
+
+- include: ../../common/openshift-management/add_container_provider.yml
diff --git a/playbooks/byo/openshift-management/add_many_container_providers.yml b/playbooks/byo/openshift-management/add_many_container_providers.yml
new file mode 100644
index 000000000..62fdb11c5
--- /dev/null
+++ b/playbooks/byo/openshift-management/add_many_container_providers.yml
@@ -0,0 +1,36 @@
+---
+- hosts: localhost
+ tasks:
+ - name: Ensure the container provider configuration is defined
+ assert:
+ that: container_providers_config is defined
+ msg: |
+ Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command
+
+ - name: Include providers/management configuration
+ include_vars:
+ file: "{{ container_providers_config }}"
+
+ - name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_server['hostname'] }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ management_server['user'] }}"
+ password: "{{ management_server['password'] }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body: "{{ item }}"
+ failed_when: false
+ with_items: "{{ container_providers }}"
+ register: results
+
+ # Include openshift_management for access to filter_plugins.
+ - include_role:
+ name: openshift_management
+ tasks_from: noop
+
+ - name: print each result
+ debug:
+ msg: "{{ results.results | oo_filter_container_providers }}"
diff --git a/playbooks/byo/openshift-cfme/config.yml b/playbooks/byo/openshift-management/config.yml
index 0e8e7a94d..e8795ef85 100644
--- a/playbooks/byo/openshift-cfme/config.yml
+++ b/playbooks/byo/openshift-management/config.yml
@@ -1,8 +1,6 @@
---
- include: ../openshift-cluster/initialize_groups.yml
- tags:
- - always
- include: ../../common/openshift-cluster/evaluate_groups.yml
-- include: ../../common/openshift-cfme/config.yml
+- include: ../../common/openshift-management/config.yml
diff --git a/playbooks/common/openshift-cfme/roles b/playbooks/byo/openshift-management/roles
index 20c4c58cf..20c4c58cf 120000
--- a/playbooks/common/openshift-cfme/roles
+++ b/playbooks/byo/openshift-management/roles
diff --git a/playbooks/byo/openshift-management/uninstall.yml b/playbooks/byo/openshift-management/uninstall.yml
new file mode 100644
index 000000000..e95c1c88a
--- /dev/null
+++ b/playbooks/byo/openshift-management/uninstall.yml
@@ -0,0 +1,2 @@
+---
+- include: ../../common/openshift-management/uninstall.yml
diff --git a/playbooks/byo/openshift-master/certificates.yml b/playbooks/byo/openshift-master/certificates.yml
index 26b964034..e147dcba1 100644
--- a/playbooks/byo/openshift-master/certificates.yml
+++ b/playbooks/byo/openshift-master/certificates.yml
@@ -3,6 +3,4 @@
- include: ../../common/openshift-cluster/std_include.yml
-- include: ../../common/openshift-master/ca.yml
-
- include: ../../common/openshift-master/certificates.yml
diff --git a/playbooks/byo/openshift-node/scaleup.yml b/playbooks/byo/openshift-node/scaleup.yml
index 9f992cca6..e0c36fb69 100644
--- a/playbooks/byo/openshift-node/scaleup.yml
+++ b/playbooks/byo/openshift-node/scaleup.yml
@@ -16,6 +16,4 @@
- include: ../../common/openshift-cluster/std_include.yml
-- include: ../../common/openshift-node/certificates.yml
-
- include: ../../common/openshift-node/config.yml
diff --git a/playbooks/common/openshift-cfme/config.yml b/playbooks/common/openshift-cfme/config.yml
deleted file mode 100644
index 533a35d9e..000000000
--- a/playbooks/common/openshift-cfme/config.yml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# TODO: Make this work. The 'name' variable below is undefined
-# presently because it's part of the cfme role. This play can't run
-# until that's re-worked.
-#
-# - name: Pre-Pull manageiq-pods docker images
-# hosts: nodes
-# tasks:
-# - name: Ensure the latest manageiq-pods docker image is pulling
-# docker_image:
-# name: "{{ openshift_cfme_container_image }}"
-# # Fire-and-forget method, never timeout
-# async: 99999999999
-# # F-a-f, never check on this. True 'background' task.
-# poll: 0
-
-- name: Configure Masters for CFME Bulk Image Imports
- hosts: oo_masters_to_config
- serial: 1
- tasks:
- - name: Run master cfme tuning playbook
- include_role:
- name: openshift_cfme
- tasks_from: tune_masters
-
-- name: Setup CFME
- hosts: oo_first_master
- vars:
- r_openshift_cfme_miq_template_content: "{{ lookup('file', 'roles/openshift_cfme/files/miq-template.yaml') | from_yaml}}"
- pre_tasks:
- - name: Create a temporary place to evaluate the PV templates
- command: mktemp -d /tmp/openshift-ansible-XXXXXXX
- register: r_openshift_cfme_mktemp
- changed_when: false
- - name: Ensure the server template was read from disk
- debug:
- msg="{{ r_openshift_cfme_miq_template_content | from_yaml }}"
-
- tasks:
- - name: Run the CFME Setup Role
- include_role:
- name: openshift_cfme
- vars:
- template_dir: "{{ hostvars[groups.masters.0].r_openshift_cfme_mktemp.stdout }}"
diff --git a/playbooks/common/openshift-checks/adhoc.yml b/playbooks/common/openshift-checks/adhoc.yml
index dfcef8435..d0deaeb65 100644
--- a/playbooks/common/openshift-checks/adhoc.yml
+++ b/playbooks/common/openshift-checks/adhoc.yml
@@ -1,12 +1,13 @@
---
-- name: OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: adhoc
post_tasks:
- - name: Run health checks
+ - name: Run health checks (adhoc)
action: openshift_health_check
args:
checks: '{{ openshift_checks | default([]) }}'
diff --git a/playbooks/common/openshift-checks/health.yml b/playbooks/common/openshift-checks/health.yml
index 21ea785ef..d0921b9d3 100644
--- a/playbooks/common/openshift-checks/health.yml
+++ b/playbooks/common/openshift-checks/health.yml
@@ -1,11 +1,13 @@
---
-- name: Run OpenShift health checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: health
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@health)
+ action: openshift_health_check
args:
checks: ['@health']
diff --git a/playbooks/common/openshift-checks/install.yml b/playbooks/common/openshift-checks/install.yml
new file mode 100644
index 000000000..6701a2e15
--- /dev/null
+++ b/playbooks/common/openshift-checks/install.yml
@@ -0,0 +1,47 @@
+---
+- name: Health Check Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'In Progress'
+ set_stats:
+ data:
+ installer_phase_health: "In Progress"
+ aggregate: false
+
+- name: OpenShift Health Checks
+ hosts: oo_all_hosts
+ any_errors_fatal: true
+ roles:
+ - openshift_health_checker
+ vars:
+ - r_openshift_health_checker_playbook_context: install
+ post_tasks:
+ - name: Run health checks (install) - EL
+ when: ansible_distribution != "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - disk_availability
+ - memory_availability
+ - package_availability
+ - package_version
+ - docker_image_availability
+ - docker_storage
+
+ - name: Run health checks (install) - Fedora
+ when: ansible_distribution == "Fedora"
+ action: openshift_health_check
+ args:
+ checks:
+ - docker_image_availability
+
+- name: Health Check Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Health Check 'Complete'
+ set_stats:
+ data:
+ installer_phase_health: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-checks/pre-install.yml b/playbooks/common/openshift-checks/pre-install.yml
index 88e6f9120..32449d4e4 100644
--- a/playbooks/common/openshift-checks/pre-install.yml
+++ b/playbooks/common/openshift-checks/pre-install.yml
@@ -1,11 +1,13 @@
---
-- name: run OpenShift pre-install checks
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: pre-install
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (@preflight)
+ action: openshift_health_check
args:
checks: ['@preflight']
diff --git a/playbooks/common/openshift-cluster/config.yml b/playbooks/common/openshift-cluster/config.yml
index 4ca0d48e4..244787985 100644
--- a/playbooks/common/openshift-cluster/config.yml
+++ b/playbooks/common/openshift-cluster/config.yml
@@ -1,26 +1,5 @@
---
-# TODO: refactor this into its own include
-# and pass a variable for ctx
-- name: Verify Requirements
- hosts: oo_all_hosts
- roles:
- - openshift_health_checker
- vars:
- - r_openshift_health_checker_playbook_context: install
- post_tasks:
- - action: openshift_health_check
- args:
- checks:
- - disk_availability
- - memory_availability
- - package_availability
- - package_version
- - docker_image_availability
- - docker_storage
-
-- include: ../openshift-etcd/ca.yml
-
-- include: ../openshift-etcd/certificates.yml
+- include: ../openshift-checks/install.yml
- include: ../openshift-etcd/config.yml
@@ -30,16 +9,10 @@
- include: ../openshift-loadbalancer/config.yml
when: groups.oo_lb_to_config | default([]) | count > 0
-- include: ../openshift-master/ca.yml
-
-- include: ../openshift-master/certificates.yml
-
- include: ../openshift-master/config.yml
- include: ../openshift-master/additional_config.yml
-- include: ../openshift-node/certificates.yml
-
- include: ../openshift-node/config.yml
- include: ../openshift-glusterfs/config.yml
@@ -56,6 +29,9 @@
- include: service_catalog.yml
when: openshift_enable_service_catalog | default(false) | bool
+- include: ../openshift-management/config.yml
+ when: openshift_management_install_management | default(false) | bool
+
- name: Print deprecated variable warning message if necessary
hosts: oo_first_master
gather_facts: no
diff --git a/playbooks/common/openshift-cluster/create_persistent_volumes.yml b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
index 8a60a30b8..ec6f2c52c 100644
--- a/playbooks/common/openshift-cluster/create_persistent_volumes.yml
+++ b/playbooks/common/openshift-cluster/create_persistent_volumes.yml
@@ -1,4 +1,13 @@
---
+- name: Create persistent volumes
+ hosts: oo_first_master
+ vars:
+ persistent_volumes: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volumes(groups) }}"
+ persistent_volume_claims: "{{ hostvars[groups.oo_first_master.0] | oo_persistent_volume_claims }}"
+ tasks:
+ - debug: var=persistent_volumes
+ - debug: var=persistent_volume_claims
+
- name: Create Hosted Resources - persistent volumes
hosts: oo_first_master
vars:
diff --git a/playbooks/common/openshift-cluster/evaluate_groups.yml b/playbooks/common/openshift-cluster/evaluate_groups.yml
index e55b2f964..78b552279 100644
--- a/playbooks/common/openshift-cluster/evaluate_groups.yml
+++ b/playbooks/common/openshift-cluster/evaluate_groups.yml
@@ -51,7 +51,7 @@
when:
- g_etcd_hosts | default([]) | length not in [3,1]
- not openshift_master_unsupported_embedded_etcd | default(False)
- - not openshift_node_bootstrap | default(False)
+ - not (openshift_node_bootstrap | default(False))
- name: Evaluate oo_all_hosts
add_host:
diff --git a/playbooks/common/openshift-cluster/initialize_facts.yml b/playbooks/common/openshift-cluster/initialize_facts.yml
index be2f8b5f4..91223d368 100644
--- a/playbooks/common/openshift-cluster/initialize_facts.yml
+++ b/playbooks/common/openshift-cluster/initialize_facts.yml
@@ -10,6 +10,7 @@
- name: load openshift_facts module
include_role:
name: openshift_facts
+ static: yes
# TODO: Should this role be refactored into health_checks??
- name: Run openshift_sanitize_inventory to set variables
@@ -145,7 +146,19 @@
https_proxy: "{{ openshift_https_proxy | default(None) }}"
no_proxy: "{{ openshift_no_proxy | default(None) }}"
generate_no_proxy_hosts: "{{ openshift_generate_no_proxy_hosts | default(True) }}"
- no_proxy_internal_hostnames: "{{ openshift_no_proxy_internal_hostnames | default(None) }}"
+
+ - name: Set fact of no_proxy_internal_hostnames
+ openshift_facts:
+ role: common
+ local_facts:
+ no_proxy_internal_hostnames: "{{ hostvars | oo_select_keys(groups['oo_nodes_to_config']
+ | union(groups['oo_masters_to_config'])
+ | union(groups['oo_etcd_to_config'] | default([])))
+ | oo_collect('openshift.common.hostname') | default([]) | join (',')
+ }}"
+ when:
+ - openshift_http_proxy is defined or openshift_https_proxy is defined
+ - openshift_generate_no_proxy_hosts | default(True) | bool
- name: initialize_facts set_fact repoquery command
set_fact:
diff --git a/playbooks/common/openshift-cluster/initialize_openshift_version.yml b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
index 6100c36e1..37a5284d5 100644
--- a/playbooks/common/openshift-cluster/initialize_openshift_version.yml
+++ b/playbooks/common/openshift-cluster/initialize_openshift_version.yml
@@ -1,15 +1,4 @@
---
-# openshift_install_base_package_group may be set in a play variable to limit
-# the host groups the base package is installed on. This is currently used
-# for master/control-plane upgrades.
-- name: Set version_install_base_package true on masters and nodes
- hosts: "{{ openshift_install_base_package_group | default('oo_masters_to_config:oo_nodes_to_config') }}"
- tasks:
- - name: Set version_install_base_package true
- set_fact:
- version_install_base_package: True
- when: version_install_base_package is not defined
-
# NOTE: requires openshift_facts be run
- name: Determine openshift_version to configure on first master
hosts: oo_first_master
@@ -19,8 +8,8 @@
# NOTE: We set this even on etcd hosts as they may also later run as masters,
# and we don't want to install wrong version of docker and have to downgrade
# later.
-- name: Set openshift_version for all hosts
- hosts: oo_all_hosts:!oo_first_master
+- name: Set openshift_version for etcd, node, and master hosts
+ hosts: oo_etcd_to_config:oo_nodes_to_config:oo_masters_to_config:!oo_first_master
vars:
openshift_version: "{{ hostvars[groups.oo_first_master.0].openshift_version }}"
pre_tasks:
diff --git a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
index 4b4f19690..62fe0dd60 100644
--- a/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
+++ b/playbooks/common/openshift-cluster/openshift_default_storage_class.yml
@@ -3,4 +3,4 @@
hosts: oo_first_master
roles:
- role: openshift_default_storage_class
- when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce')
+ when: openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce' or openshift_cloudprovider_kind == 'openstack')
diff --git a/playbooks/common/openshift-cluster/openshift_hosted.yml b/playbooks/common/openshift-cluster/openshift_hosted.yml
index 32e5e708a..c1536eb36 100644
--- a/playbooks/common/openshift-cluster/openshift_hosted.yml
+++ b/playbooks/common/openshift-cluster/openshift_hosted.yml
@@ -1,7 +1,6 @@
---
- name: Hosted Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Hosted install 'In Progress'
@@ -26,8 +25,7 @@
when: openshift_hosted_prometheus_deploy | default(False) | bool
- name: Hosted Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Hosted install 'Complete'
diff --git a/playbooks/common/openshift-cluster/openshift_logging.yml b/playbooks/common/openshift-cluster/openshift_logging.yml
index 69f50fbcd..529a4c939 100644
--- a/playbooks/common/openshift-cluster/openshift_logging.yml
+++ b/playbooks/common/openshift-cluster/openshift_logging.yml
@@ -1,7 +1,6 @@
---
- name: Logging Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Logging install 'In Progress'
@@ -24,8 +23,7 @@
tasks_from: update_master_config
- name: Logging Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Logging install 'Complete'
diff --git a/playbooks/common/openshift-cluster/openshift_metrics.yml b/playbooks/common/openshift-cluster/openshift_metrics.yml
index e369dcd86..9c0bd489b 100644
--- a/playbooks/common/openshift-cluster/openshift_metrics.yml
+++ b/playbooks/common/openshift-cluster/openshift_metrics.yml
@@ -1,7 +1,6 @@
---
- name: Metrics Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Metrics install 'In Progress'
@@ -25,8 +24,7 @@
tasks_from: update_master_config.yaml
- name: Metrics Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Metrics install 'Complete'
diff --git a/playbooks/common/openshift-cluster/openshift_prometheus.yml b/playbooks/common/openshift-cluster/openshift_prometheus.yml
index ac2d250a3..a73b294a5 100644
--- a/playbooks/common/openshift-cluster/openshift_prometheus.yml
+++ b/playbooks/common/openshift-cluster/openshift_prometheus.yml
@@ -1,5 +1,25 @@
---
+- name: Prometheus Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_prometheus: "In Progress"
+ aggregate: false
+
- name: Create Hosted Resources - openshift_prometheus
hosts: oo_first_master
roles:
- role: openshift_prometheus
+
+- name: Prometheus Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Prometheus install 'Complete'
+ set_stats:
+ data:
+ installer_phase_prometheus: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
index 12cd209d2..2068ed199 100644
--- a/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
+++ b/playbooks/common/openshift-cluster/redeploy-certificates/openshift-ca.yml
@@ -44,8 +44,8 @@
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: servingInfo.clientCA
- yaml_value: ca-bundle.crt
- when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca-bundle.crt'
+ yaml_value: ca.crt
+ when: (g_master_config_output.content|b64decode|from_yaml).servingInfo.clientCA != 'ca.crt'
- modify_yaml:
dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
yaml_key: etcdClientInfo.ca
@@ -114,12 +114,18 @@
register: g_new_openshift_ca_mktemp
changed_when: false
-- include: ../../openshift-master/ca.yml
+- name: Create OpenShift CA
+ hosts: oo_first_master
vars:
# Set openshift_ca_config_dir to a temporary directory where CA
# will be created. We'll replace the existing CA with the CA
# created in the temporary directory.
openshift_ca_config_dir: "{{ hostvars[groups.oo_first_master.0].g_new_openshift_ca_mktemp.stdout }}"
+ roles:
+ - role: openshift_master_facts
+ - role: openshift_named_certificates
+ - role: openshift_ca
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- name: Create temp directory for syncing certs
hosts: localhost
diff --git a/playbooks/common/openshift-cluster/service_catalog.yml b/playbooks/common/openshift-cluster/service_catalog.yml
index 95a8f601c..bd964b2ce 100644
--- a/playbooks/common/openshift-cluster/service_catalog.yml
+++ b/playbooks/common/openshift-cluster/service_catalog.yml
@@ -1,7 +1,6 @@
---
- name: Service Catalog Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Service Catalog install 'In Progress'
@@ -20,8 +19,7 @@
first_master: "{{ groups.oo_first_master[0] }}"
- name: Service Catalog Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Service Catalog install 'Complete'
diff --git a/playbooks/common/openshift-cluster/std_include.yml b/playbooks/common/openshift-cluster/std_include.yml
index 090ad6445..45b34c8bd 100644
--- a/playbooks/common/openshift-cluster/std_include.yml
+++ b/playbooks/common/openshift-cluster/std_include.yml
@@ -1,7 +1,6 @@
---
- name: Initialization Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
roles:
- installer_checkpoint
@@ -37,8 +36,7 @@
- always
- name: Initialization Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set install initialization 'Complete'
diff --git a/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
new file mode 100644
index 000000000..9c9c260fb
--- /dev/null
+++ b/playbooks/common/openshift-cluster/upgrades/files/shared_resource_viewer_role.yaml
@@ -0,0 +1,37 @@
+---
+apiVersion: v1
+kind: Role
+metadata:
+ name: shared-resource-viewer
+ namespace: openshift
+rules:
+- apiGroups:
+ - ""
+ - template.openshift.io
+ attributeRestrictions: null
+ resources:
+ - templates
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreamimages
+ - imagestreams
+ - imagestreamtags
+ verbs:
+ - get
+ - list
+ - watch
+- apiGroups:
+ - ""
+ - image.openshift.io
+ attributeRestrictions: null
+ resources:
+ - imagestreams/layers
+ verbs:
+ - get
diff --git a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
index 72de63070..fc1cbf32a 100644
--- a/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/initialize_nodes_to_upgrade.yml
@@ -30,6 +30,7 @@
ansible_become: "{{ g_sudo | default(omit) }}"
with_items: " {{ groups['oo_nodes_to_config'] }}"
when:
+ - hostvars[item].openshift is defined
- hostvars[item].openshift.common.hostname in nodes_to_upgrade.results.results[0]['items'] | map(attribute='metadata.name') | list
changed_when: false
diff --git a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
index 07e521a89..122066955 100644
--- a/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/post_control_plane.yml
@@ -103,9 +103,16 @@
openshift_hosted_templates_import_command: replace
# Check for warnings to be printed at the end of the upgrade:
-- name: Check for warnings
+- name: Clean up and display warnings
hosts: oo_masters_to_config
- tasks:
+ tags:
+ - always
+ gather_facts: no
+ roles:
+ - role: openshift_excluder
+ r_openshift_excluder_action: enable
+ r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ post_tasks:
# Check if any masters are using pluginOrderOverride and warn if so, only for 1.3/3.3 and beyond:
- name: grep pluginOrderOverride
command: grep pluginOrderOverride {{ openshift.common.config_base }}/master/master-config.yaml
@@ -121,12 +128,8 @@
- not grep_plugin_order_override | skipped
- grep_plugin_order_override.rc == 0
-- name: Re-enable excluder if it was previously enabled
- hosts: oo_masters_to_config
- tags:
- - always
- gather_facts: no
- roles:
- - role: openshift_excluder
- r_openshift_excluder_action: enable
- r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
+ - name: Warn if shared-resource-viewer could not be updated
+ debug:
+ msg: "WARNING the shared-resource-viewer role could not be upgraded to 3.6 spec because it's marked protected, please see https://bugzilla.redhat.com/show_bug.cgi?id=1493213"
+ when:
+ - __shared_resource_viewer_protected | default(false)
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
index 45022cd61..6a5bc24f7 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_control_plane_running.yml
@@ -9,16 +9,29 @@
local_facts:
ha: "{{ groups.oo_masters_to_config | length > 1 }}"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-api"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
+ - when: openshift.common.is_containerized | bool
+ block:
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master"
- - name: Ensure HA Master is running
- service:
- name: "{{ openshift.common.service_type }}-master-controllers"
- state: started
- enabled: yes
- when: openshift.common.is_containerized | bool
+ # In case of the non-ha to ha upgrade.
+ - name: Check if the {{ openshift.common.service_type }}-master-api.service exists
+ command: >
+ systemctl list-units {{ openshift.common.service_type }}-master-api.service --no-legend
+ register: master_api_service_status
+
+ - set_fact:
+ master_services:
+ - "{{ openshift.common.service_type }}-master-api"
+ - "{{ openshift.common.service_type }}-master-controllers"
+ when:
+ - master_api_service_status.stdout_lines | length > 0
+ - (openshift.common.service_type + '-master-api.service') in master_api_service_status.stdout_lines[0]
+
+ - name: Ensure Master is running
+ service:
+ name: "{{ item }}"
+ state: started
+ enabled: yes
+ with_items: "{{ master_services }}"
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
index ad6325ca0..2a8de50a2 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_health_checks.yml
@@ -1,12 +1,14 @@
---
-- name: Verify Host Requirements
+- name: OpenShift Health Checks
hosts: oo_all_hosts
+ any_errors_fatal: true
roles:
- openshift_health_checker
vars:
- r_openshift_health_checker_playbook_context: upgrade
post_tasks:
- - action: openshift_health_check
+ - name: Run health checks (upgrade)
+ action: openshift_health_check
args:
checks:
- disk_availability
diff --git a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
index 142ce5f3d..13fa37b09 100644
--- a/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
+++ b/playbooks/common/openshift-cluster/upgrades/pre/verify_upgrade_targets.yml
@@ -4,6 +4,12 @@
msg: Verify OpenShift is already installed
when: openshift.common.version is not defined
+- name: Update oreg_auth docker login credentials if necessary
+ include_role:
+ name: docker
+ tasks_from: registry_auth.yml
+ when: oreg_auth_user is defined
+
- name: Verify containers are available for upgrade
command: >
docker pull {{ openshift.common.cli_image }}:{{ openshift_image_tag }}
@@ -37,7 +43,7 @@
fail:
msg: "OpenShift {{ avail_openshift_version }} is available, but {{ openshift_upgrade_target }} or greater is required"
when:
- - openshift_pkg_version | default('0.0', True) | version_compare(openshift_release, '<')
+ - (openshift_pkg_version | default('-0.0', True)).split('-')[1] | version_compare(openshift_release, '<')
- name: Fail when openshift version does not meet minium requirement for Origin upgrade
fail:
diff --git a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
index da47491c1..a5e2f7940 100644
--- a/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/upgrade_control_plane.yml
@@ -31,7 +31,6 @@
role: master
local_facts:
embedded_etcd: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level | default(2)) }}"
- name: Upgrade and backup etcd
include: ./etcd/main.yml
@@ -91,6 +90,9 @@
- include_vars: ../../../../roles/openshift_master/vars/main.yml
+ - name: Update journald config
+ include: ../../../../roles/openshift_master/tasks/journald.yml
+
- name: Remove any legacy systemd units and update systemd units
include: ../../../../roles/openshift_master/tasks/systemd_units.yml
@@ -193,13 +195,14 @@
# Another spot where we assume docker is running and do not want to accidentally trigger an unsafe
# restart.
skip_docker_role: True
+ __master_shared_resource_viewer_file: "shared_resource_viewer_role.yaml"
tasks:
- name: Reconcile Cluster Roles
command: >
{{ openshift.common.client_binary }} adm --config={{ openshift.common.config_base }}/master/admin.kubeconfig
policy reconcile-cluster-roles --additive-only=true --confirm -o name
register: reconcile_cluster_role_result
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
changed_when:
- reconcile_cluster_role_result.stdout != ''
- reconcile_cluster_role_result.rc == 0
@@ -214,7 +217,7 @@
--exclude-groups=system:unauthenticated
--exclude-users=system:anonymous
--additive-only=true --confirm -o name
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
register: reconcile_bindings_result
changed_when:
- reconcile_bindings_result.stdout != ''
@@ -229,7 +232,52 @@
changed_when:
- reconcile_jenkins_role_binding_result.stdout != ''
- reconcile_jenkins_role_binding_result.rc == 0
- when: (not openshift.common.version_gte_3_7 | bool) and (openshift.common.version_gte_3_4_or_1_4 | bool)
+ when:
+ - openshift_version | version_compare('3.7','<')
+ - openshift_version | version_compare('3.4','>=')
+
+ - when: openshift_upgrade_target | version_compare('3.7','<')
+ block:
+ - name: Retrieve shared-resource-viewer
+ oc_obj:
+ state: list
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ register: objout
+
+ - name: Determine if shared-resource-viewer is protected
+ set_fact:
+ __shared_resource_viewer_protected: true
+ when:
+ - "'results' in objout"
+ - "'results' in objout['results']"
+ - "'annotations' in objout['results']['results'][0]['metadata']"
+ - "'openshift.io/reconcile-protect' in objout['results']['results'][0]['metadata']['annotations']"
+ - "objout['results']['results'][0]['metadata']['annotations']['openshift.io/reconcile-protect'] == 'true'"
+ - copy:
+ src: "{{ item }}"
+ dest: "/tmp/{{ item }}"
+ with_items:
+ - "{{ __master_shared_resource_viewer_file }}"
+ when: __shared_resource_viewer_protected is not defined
+
+ - name: Fixup shared-resource-viewer role
+ oc_obj:
+ state: present
+ kind: role
+ name: "shared-resource-viewer"
+ namespace: "openshift"
+ files:
+ - "/tmp/{{ __master_shared_resource_viewer_file }}"
+ delete_after: true
+ when: __shared_resource_viewer_protected is not defined
+ register: result
+ retries: 3
+ delay: 5
+ until: result.rc == 0
+ ignore_errors: true
+
- name: Reconcile Security Context Constraints
command: >
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
index d69472fad..5e7a66171 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/master_config_upgrade.yml
@@ -41,12 +41,12 @@
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.certFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'controllerConfig.servicesServingCert.signer.keyFile'
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
yaml_value: service-signer.key
- modify_yaml:
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
index f64f0e003..54c85f0fb 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_3/upgrade_control_plane.yml
@@ -68,7 +68,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
index ed89dbe8d..52458e03c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/master_config_upgrade.yml
@@ -1,16 +1,10 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
index 43da5b629..d7cb38d03 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_4/upgrade_control_plane.yml
@@ -68,7 +68,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
index ed89dbe8d..52458e03c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/master_config_upgrade.yml
@@ -1,16 +1,10 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
-
-- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
index 30e719d8f..bda245fe1 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade.yml
@@ -112,6 +112,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_5/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
index e9cec9220..6cdea7b84 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_5/upgrade_control_plane.yml
@@ -72,7 +72,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
index ed89dbe8d..db0c8f886 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/master_config_upgrade.yml
@@ -1,16 +1,15 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
- modify_yaml:
- dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
index 920dc2ffc..dd109cfa9 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade.yml
@@ -116,6 +116,8 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_6/master_config_upgrade.yml"
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
index 27d8515dc..8ab68002d 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_6/upgrade_control_plane.yml
@@ -76,7 +76,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
index ed89dbe8d..1d4d1919c 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/master_config_upgrade.yml
@@ -1,16 +1,20 @@
---
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginConfig'
- yaml_value: "{{ openshift.master.admission_plugin_config }}"
- when: "'admission_plugin_config' in openshift.master"
+ yaml_key: 'controllerConfig.election.lockName'
+ yaml_value: 'openshift-master-controllers'
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'admissionConfig.pluginOrderOverride'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.certFile'
+ yaml_value: service-signer.crt
- modify_yaml:
dest: "{{ openshift.common.config_base}}/master/master-config.yaml"
- yaml_key: 'kubernetesMasterConfig.admissionConfig'
- yaml_value:
+ yaml_key: 'controllerConfig.serviceServingCert.signer.keyFile'
+ yaml_value: service-signer.key
+
+- modify_yaml:
+ dest: "{{ openshift.common.config_base }}/master/master-config.yaml"
+ yaml_key: servingInfo.clientCA
+ yaml_value: ca.crt
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
index f1ca1edb9..f4862e321 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade.yml
@@ -120,6 +120,22 @@
- include: ../cleanup_unused_images.yml
- include: ../upgrade_control_plane.yml
+ vars:
+ master_config_hook: "v3_7/master_config_upgrade.yml"
+
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_masters_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
- include: ../upgrade_nodes.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
index 6c4f9671b..d5a8379d7 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/upgrade_control_plane.yml
@@ -80,7 +80,6 @@
# defined, and overriding the normal behavior of protecting the installed version
openshift_release: "{{ openshift_upgrade_target }}"
openshift_protect_installed_version: False
- openshift_install_base_package_group: "oo_masters_to_config"
# We skip the docker role at this point in upgrade to prevent
# unintended package, container, or config upgrades which trigger
@@ -128,4 +127,18 @@
vars:
master_config_hook: "v3_7/master_config_upgrade.yml"
+# All controllers must be stopped at the same time then restarted
+- name: Cycle all controller services to force new leader election mode
+ hosts: oo_etcd_to_config
+ gather_facts: no
+ tasks:
+ - name: Stop {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: stopped
+ - name: Start {{ openshift.common.service_type }}-master-controllers
+ systemd:
+ name: "{{ openshift.common.service_type }}-master-controllers"
+ state: started
+
- include: ../post_control_plane.yml
diff --git a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
index f76fc68d1..8e4f99c91 100644
--- a/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
+++ b/playbooks/common/openshift-cluster/upgrades/v3_7/validator.yml
@@ -15,7 +15,7 @@
- name: Confirm OpenShift authorization objects are in sync
command: >
{{ openshift.common.client_binary }} adm migrate authorization
- when: not openshift.common.version_gte_3_7 | bool
+ when: openshift_version | version_compare('3.7','<')
changed_when: false
register: l_oc_result
until: l_oc_result.rc == 0
diff --git a/playbooks/common/openshift-etcd/certificates.yml b/playbooks/common/openshift-etcd/certificates.yml
index 31a0f50d8..eb6b94f33 100644
--- a/playbooks/common/openshift-etcd/certificates.yml
+++ b/playbooks/common/openshift-etcd/certificates.yml
@@ -1,29 +1,4 @@
---
-- name: Create etcd server certificates for etcd hosts
- hosts: oo_etcd_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- post_tasks:
- - include_role:
- name: etcd
- tasks_from: server_certificates
- vars:
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
- etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+- include: server_certificates.yml
-- name: Create etcd client certificates for master hosts
- hosts: oo_masters_to_config
- any_errors_fatal: true
- roles:
- - role: openshift_etcd_facts
- - role: openshift_etcd_client_certificates
- etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
- etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
- etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
- etcd_cert_prefix: "master.etcd-"
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
- r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
+- include: master_etcd_certificates.yml
diff --git a/playbooks/common/openshift-etcd/config.yml b/playbooks/common/openshift-etcd/config.yml
index 2cae231b4..48d46bbb0 100644
--- a/playbooks/common/openshift-etcd/config.yml
+++ b/playbooks/common/openshift-etcd/config.yml
@@ -1,7 +1,6 @@
---
- name: etcd Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set etcd install 'In Progress'
@@ -10,6 +9,10 @@
installer_phase_etcd: "In Progress"
aggregate: false
+- include: ca.yml
+
+- include: certificates.yml
+
- name: Configure etcd
hosts: oo_etcd_to_config
any_errors_fatal: true
@@ -23,8 +26,7 @@
- role: nickhammond.logrotate
- name: etcd Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set etcd install 'Complete'
diff --git a/playbooks/common/openshift-etcd/embedded2external.yml b/playbooks/common/openshift-etcd/embedded2external.yml
new file mode 100644
index 000000000..b16b78c4f
--- /dev/null
+++ b/playbooks/common/openshift-etcd/embedded2external.yml
@@ -0,0 +1,172 @@
+---
+- name: Pre-migrate checks
+ hosts: localhost
+ tasks:
+ # Check there is only one etcd host
+ - assert:
+ that: groups.oo_etcd_to_config | default([]) | length == 1
+ msg: "[etcd] group must contain only one host"
+ # Check there is only one master
+ - assert:
+ that: groups.oo_masters_to_config | default([]) | length == 1
+ msg: "[master] group must contain only one host"
+
+# 1. stop a master
+- name: Prepare masters for etcd data migration
+ hosts: oo_first_master
+ roles:
+ - role: openshift_facts
+ tasks:
+ - name: Check the master API is ready
+ include_role:
+ name: openshift_master
+ tasks_from: check_master_api_is_ready
+ - set_fact:
+ master_service: "{{ openshift.common.service_type + '-master' }}"
+ embedded_etcd_backup_suffix: "{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }}"
+ - debug:
+ msg: "master service name: {{ master_service }}"
+ - name: Stop master
+ service:
+ name: "{{ master_service }}"
+ state: stopped
+ # 2. backup embedded etcd
+ # Can't use with_items with include_role: https://github.com/ansible/ansible/issues/21285
+ - include_role:
+ name: etcd
+ tasks_from: backup
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.archive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ embedded_etcd_backup_suffix }}"
+
+# 3. deploy certificates (for etcd and master)
+- include: ca.yml
+
+- include: server_certificates.yml
+
+- name: Backup etcd client certificates for master host
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup_master_etcd_certificates
+
+- name: Redeploy master etcd certificates
+ include: master_etcd_certificates.yml
+ vars:
+ etcd_certificates_redeploy: "{{ true }}"
+
+# 4. deploy external etcd
+- include: ../openshift-etcd/config.yml
+
+# 5. stop external etcd
+- name: Cleanse etcd
+ hosts: oo_etcd_to_config[0]
+ gather_facts: no
+ pre_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ - include_role:
+ name: etcd
+ tasks_from: clean_data
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+
+# 6. copy the embedded etcd backup to the external host
+# TODO(jchaloup): if the etcd and first master are on the same host, just copy the directory
+- name: Copy embedded etcd backup to the external host
+ hosts: localhost
+ tasks:
+ - name: Create local temp directory for syncing etcd backup
+ local_action: command mktemp -d /tmp/etcd_backup-XXXXXXX
+ register: g_etcd_client_mktemp
+ changed_when: False
+ become: no
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.fetch
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_first_master.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_embedded_etcd: "{{ true }}"
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_first_master[0] }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.copy
+ vars:
+ r_etcd_common_etcd_runtime: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.etcd_runtime }}"
+ etcd_backup_sync_directory: "{{ g_etcd_client_mktemp.stdout }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ delegate_to: "{{ groups.oo_etcd_to_config[0] }}"
+
+ - debug:
+ msg: "etcd_backup_dest_directory: {{ g_etcd_client_mktemp.stdout }}"
+
+ - name: Delete temporary directory
+ local_action: file path="{{ g_etcd_client_mktemp.stdout }}" state=absent
+ changed_when: False
+ become: no
+
+# 7. force new cluster from the backup
+- name: Force new etcd cluster
+ hosts: oo_etcd_to_config[0]
+ tasks:
+ - include_role:
+ name: etcd
+ tasks_from: backup.unarchive
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+
+ - include_role:
+ name: etcd
+ tasks_from: backup.force_new_cluster
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ r_etcd_common_backup_tag: pre-migrate
+ r_etcd_common_backup_sufix_name: "{{ hostvars[groups.oo_first_master.0].embedded_etcd_backup_suffix }}"
+ etcd_peer: "{{ openshift.common.ip }}"
+ etcd_url_scheme: "https"
+ etcd_peer_url_scheme: "https"
+
+# 8. re-configure master to use the external etcd
+- name: Configure master to use external etcd
+ hosts: oo_first_master
+ tasks:
+ - include_role:
+ name: openshift_master
+ tasks_from: configure_external_etcd
+ vars:
+ etcd_peer_url_scheme: "https"
+ etcd_ip: "{{ hostvars[groups.oo_etcd_to_config.0].openshift.common.ip }}"
+ etcd_peer_port: 2379
+
+ # 9. start the master
+ - name: Start master
+ service:
+ name: "{{ master_service }}"
+ state: started
+ register: service_status
+ until: service_status.state is defined and service_status.state == "started"
+ retries: 5
+ delay: 10
diff --git a/playbooks/common/openshift-etcd/master_etcd_certificates.yml b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
new file mode 100644
index 000000000..0a25aac57
--- /dev/null
+++ b/playbooks/common/openshift-etcd/master_etcd_certificates.yml
@@ -0,0 +1,14 @@
+---
+- name: Create etcd client certificates for master hosts
+ hosts: oo_masters_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ - role: openshift_etcd_client_certificates
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_cert_subdir: "openshift-master-{{ openshift.common.hostname }}"
+ etcd_cert_config_dir: "{{ openshift.common.config_base }}/master"
+ etcd_cert_prefix: "master.etcd-"
+ openshift_ca_host: "{{ groups.oo_first_master.0 }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
+ when: groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config
diff --git a/playbooks/common/openshift-etcd/migrate.yml b/playbooks/common/openshift-etcd/migrate.yml
index 2456ad3a8..31362f2f6 100644
--- a/playbooks/common/openshift-etcd/migrate.yml
+++ b/playbooks/common/openshift-etcd/migrate.yml
@@ -1,4 +1,17 @@
---
+- name: Check if the master has embedded etcd
+ hosts: localhost
+ connection: local
+ become: no
+ gather_facts: no
+ tags:
+ - always
+ tasks:
+ - fail:
+ msg: "Migration of an embedded etcd is not supported. Please, migrate the embedded etcd into an external etcd first."
+ when:
+ - groups.oo_etcd_to_config | default([]) | length == 0
+
- name: Run pre-checks
hosts: oo_etcd_to_migrate
tasks:
@@ -60,12 +73,11 @@
hosts: oo_etcd_to_migrate
gather_facts: no
pre_tasks:
- - set_fact:
- l_etcd_service: "{{ 'etcd_container' if openshift.common.is_containerized else 'etcd' }}"
- - name: Disable etcd members
- service:
- name: "{{ l_etcd_service }}"
- state: stopped
+ - include_role:
+ name: etcd
+ tasks_from: disable_etcd
+ vars:
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- name: Migrate data on first etcd
hosts: oo_etcd_to_migrate[0]
diff --git a/playbooks/common/openshift-etcd/scaleup.yml b/playbooks/common/openshift-etcd/scaleup.yml
index b5ba2bbba..20061366c 100644
--- a/playbooks/common/openshift-etcd/scaleup.yml
+++ b/playbooks/common/openshift-etcd/scaleup.yml
@@ -46,7 +46,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
etcd_initial_cluster_state: "existing"
- initial_etcd_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
+ etcd_initial_cluster: "{{ etcd_add_check.stdout_lines[3] | regex_replace('ETCD_INITIAL_CLUSTER=','') | regex_replace('\"','') }}"
etcd_ca_setup: False
r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
- role: nickhammond.logrotate
@@ -71,7 +71,7 @@
etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
openshift_ca_host: "{{ groups.oo_first_master.0 }}"
openshift_master_etcd_hosts: "{{ hostvars
- | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config']))
+ | oo_select_keys(groups['oo_etcd_to_config'] | union(groups['oo_new_etcd_to_config'] | default([]) ))
| oo_collect('openshift.common.hostname')
| default(none, true) }}"
openshift_master_etcd_port: "{{ (etcd_client_port | default('2379')) if (groups.oo_etcd_to_config is defined and groups.oo_etcd_to_config) else none }}"
diff --git a/playbooks/common/openshift-etcd/server_certificates.yml b/playbooks/common/openshift-etcd/server_certificates.yml
new file mode 100644
index 000000000..10e06747b
--- /dev/null
+++ b/playbooks/common/openshift-etcd/server_certificates.yml
@@ -0,0 +1,15 @@
+---
+- name: Create etcd server certificates for etcd hosts
+ hosts: oo_etcd_to_config
+ any_errors_fatal: true
+ roles:
+ - role: openshift_etcd_facts
+ post_tasks:
+ - include_role:
+ name: etcd
+ tasks_from: server_certificates
+ vars:
+ etcd_ca_host: "{{ groups.oo_etcd_to_config.0 }}"
+ etcd_peers: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ etcd_certificates_etcd_hosts: "{{ groups.oo_etcd_to_config | default([], true) }}"
+ r_etcd_common_etcd_runtime: "{{ openshift.common.etcd_runtime }}"
diff --git a/playbooks/common/openshift-glusterfs/config.yml b/playbooks/common/openshift-glusterfs/config.yml
index 516618de2..c2ae5f313 100644
--- a/playbooks/common/openshift-glusterfs/config.yml
+++ b/playbooks/common/openshift-glusterfs/config.yml
@@ -1,7 +1,6 @@
---
- name: GlusterFS Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set GlusterFS install 'In Progress'
@@ -18,6 +17,11 @@
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_is_native | default(True) | bool
- name: Open firewall ports for GlusterFS registry nodes
hosts: glusterfs_registry
@@ -27,6 +31,11 @@
tasks_from: firewall.yml
when:
- openshift_storage_glusterfs_registry_is_native | default(True) | bool
+ - include_role:
+ name: openshift_storage_glusterfs
+ tasks_from: kernel_modules.yml
+ when:
+ - openshift_storage_glusterfs_registry_is_native | default(True) | bool
- name: Configure GlusterFS
hosts: oo_first_master
@@ -37,8 +46,7 @@
when: groups.oo_glusterfs_to_config | default([]) | count > 0
- name: GlusterFS Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set GlusterFS install 'Complete'
diff --git a/playbooks/common/openshift-loadbalancer/config.yml b/playbooks/common/openshift-loadbalancer/config.yml
index ecbb092bc..2a703cb61 100644
--- a/playbooks/common/openshift-loadbalancer/config.yml
+++ b/playbooks/common/openshift-loadbalancer/config.yml
@@ -1,7 +1,6 @@
---
- name: Load Balancer Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set load balancer install 'In Progress'
@@ -10,6 +9,15 @@
installer_phase_loadbalancer: "In Progress"
aggregate: false
+- name: Configure firewall and docker for load balancers
+ hosts: oo_lb_to_config:!oo_masters_to_config:!oo_nodes_to_config
+ vars:
+ openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
+ roles:
+ - role: os_firewall
+ - role: openshift_docker
+ when: openshift.common.is_containerized | default(False) | bool and not skip_docker_role | default(False) | bool
+
- name: Configure load balancers
hosts: oo_lb_to_config
vars:
@@ -25,12 +33,11 @@
+ openshift_loadbalancer_additional_backends | default([]) }}"
openshift_image_tag: "{{ hostvars[groups.oo_first_master.0].openshift_image_tag }}"
roles:
- - role: os_firewall
- role: openshift_loadbalancer
+ - role: tuned
- name: Load Balancer Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set load balancer install 'Complete'
diff --git a/playbooks/common/openshift-management/add_container_provider.yml b/playbooks/common/openshift-management/add_container_provider.yml
new file mode 100644
index 000000000..facb3a5b9
--- /dev/null
+++ b/playbooks/common/openshift-management/add_container_provider.yml
@@ -0,0 +1,8 @@
+---
+- name: Add Container Provider to Management
+ hosts: oo_first_master
+ tasks:
+ - name: Run the Management Integration Tasks
+ include_role:
+ name: openshift_management
+ tasks_from: add_container_provider
diff --git a/playbooks/common/openshift-management/config.yml b/playbooks/common/openshift-management/config.yml
new file mode 100644
index 000000000..908679e81
--- /dev/null
+++ b/playbooks/common/openshift-management/config.yml
@@ -0,0 +1,35 @@
+---
+- name: Management Install Checkpoint Start
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'In Progress'
+ set_stats:
+ data:
+ installer_phase_management: "In Progress"
+ aggregate: false
+
+- name: Setup CFME
+ hosts: oo_first_master
+ pre_tasks:
+ - name: Create a temporary place to evaluate the PV templates
+ command: mktemp -d /tmp/openshift-ansible-XXXXXXX
+ register: r_openshift_management_mktemp
+ changed_when: false
+
+ tasks:
+ - name: Run the CFME Setup Role
+ include_role:
+ name: openshift_management
+ vars:
+ template_dir: "{{ hostvars[groups.masters.0].r_openshift_management_mktemp.stdout }}"
+
+- name: Management Install Checkpoint End
+ hosts: oo_all_hosts
+ gather_facts: false
+ tasks:
+ - name: Set Management install 'Complete'
+ set_stats:
+ data:
+ installer_phase_management: "Complete"
+ aggregate: false
diff --git a/playbooks/common/openshift-cfme/filter_plugins b/playbooks/common/openshift-management/filter_plugins
index 99a95e4ca..99a95e4ca 120000
--- a/playbooks/common/openshift-cfme/filter_plugins
+++ b/playbooks/common/openshift-management/filter_plugins
diff --git a/playbooks/common/openshift-cfme/library b/playbooks/common/openshift-management/library
index ba40d2f56..ba40d2f56 120000
--- a/playbooks/common/openshift-cfme/library
+++ b/playbooks/common/openshift-management/library
diff --git a/playbooks/common/openshift-management/roles b/playbooks/common/openshift-management/roles
new file mode 120000
index 000000000..20c4c58cf
--- /dev/null
+++ b/playbooks/common/openshift-management/roles
@@ -0,0 +1 @@
+../../../roles \ No newline at end of file
diff --git a/playbooks/common/openshift-cfme/uninstall.yml b/playbooks/common/openshift-management/uninstall.yml
index 78b8e7668..9f35cc276 100644
--- a/playbooks/common/openshift-cfme/uninstall.yml
+++ b/playbooks/common/openshift-management/uninstall.yml
@@ -1,8 +1,8 @@
---
- name: Uninstall CFME
- hosts: masters
+ hosts: masters[0]
tasks:
- name: Run the CFME Uninstall Role Tasks
include_role:
- name: openshift_cfme
+ name: openshift_management
tasks_from: uninstall
diff --git a/playbooks/common/openshift-master/additional_config.yml b/playbooks/common/openshift-master/additional_config.yml
index ee76e2ed7..350557f19 100644
--- a/playbooks/common/openshift-master/additional_config.yml
+++ b/playbooks/common/openshift-master/additional_config.yml
@@ -1,7 +1,6 @@
---
- name: Master Additional Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master Additional install 'In Progress'
@@ -26,10 +25,10 @@
- role: openshift_hosted_templates
registry_url: "{{ openshift.master.registry_url }}"
- role: openshift_manageiq
- when: openshift_use_manageiq | default(false) | bool
+ when: openshift_use_manageiq | default(true) | bool
- role: cockpit
when:
- - openshift.common.is_atomic
+ - not openshift.common.is_atomic | bool
- deployment_type == 'openshift-enterprise'
- osm_use_cockpit is undefined or osm_use_cockpit | bool
- openshift.common.deployment_subtype != 'registry'
@@ -37,8 +36,7 @@
when: openshift_use_flannel | default(false) | bool
- name: Master Additional Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master Additional install 'Complete'
diff --git a/playbooks/common/openshift-master/ca.yml b/playbooks/common/openshift-master/ca.yml
deleted file mode 100644
index 5bb796fa3..000000000
--- a/playbooks/common/openshift-master/ca.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Create OpenShift CA
- hosts: oo_masters_to_config
- roles:
- - role: openshift_master_facts
- - role: openshift_named_certificates
- - role: openshift_ca
- openshift_ca_host: "{{ groups.oo_first_master.0 }}"
diff --git a/playbooks/common/openshift-master/config.yml b/playbooks/common/openshift-master/config.yml
index 766e0e501..b359919ba 100644
--- a/playbooks/common/openshift-master/config.yml
+++ b/playbooks/common/openshift-master/config.yml
@@ -1,7 +1,6 @@
---
- name: Master Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master install 'In Progress'
@@ -10,6 +9,8 @@
installer_phase_master: "In Progress"
aggregate: false
+- include: certificates.yml
+
- name: Disable excluders
hosts: oo_masters_to_config
gather_facts: no
@@ -196,6 +197,7 @@
openshift_master_default_registry_value: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value }}"
openshift_master_default_registry_value_api: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_api }}"
openshift_master_default_registry_value_controllers: "{{ hostvars[groups.oo_first_master.0].l_default_registry_value_controllers }}"
+ - role: tuned
- role: nuage_ca
when: openshift_use_nuage | default(false) | bool
- role: nuage_common
@@ -204,6 +206,12 @@
when: openshift_use_nuage | default(false) | bool
- role: calico_master
when: openshift_use_calico | default(false) | bool
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: master
+ when: openshift_use_kuryr | default(false) | bool
+
post_tasks:
- name: Create group for deployment type
group_by: key=oo_masters_deployment_type_{{ openshift.common.deployment_type }}
@@ -224,8 +232,7 @@
r_openshift_excluder_service_type: "{{ openshift.common.service_type }}"
- name: Master Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Master install 'Complete'
diff --git a/playbooks/common/openshift-master/scaleup.yml b/playbooks/common/openshift-master/scaleup.yml
index d007fac85..f4dc9df8a 100644
--- a/playbooks/common/openshift-master/scaleup.yml
+++ b/playbooks/common/openshift-master/scaleup.yml
@@ -47,8 +47,6 @@
- include: ../openshift-etcd/certificates.yml
-- include: ../openshift-master/certificates.yml
-
- include: ../openshift-master/config.yml
- include: ../openshift-loadbalancer/config.yml
diff --git a/playbooks/common/openshift-nfs/config.yml b/playbooks/common/openshift-nfs/config.yml
index 66303d6f7..ce672daf5 100644
--- a/playbooks/common/openshift-nfs/config.yml
+++ b/playbooks/common/openshift-nfs/config.yml
@@ -1,7 +1,6 @@
---
- name: NFS Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set NFS install 'In Progress'
@@ -17,8 +16,7 @@
- role: openshift_storage_nfs
- name: NFS Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set NFS install 'Complete'
diff --git a/playbooks/common/openshift-node/additional_config.yml b/playbooks/common/openshift-node/additional_config.yml
index fe51ef833..ac757397b 100644
--- a/playbooks/common/openshift-node/additional_config.yml
+++ b/playbooks/common/openshift-node/additional_config.yml
@@ -19,10 +19,14 @@
- group_by:
key: oo_nodes_use_{{ (openshift_use_contiv | default(False)) | ternary('contiv','nothing') }}
changed_when: False
+ # Create group for kuryr nodes
+ - group_by:
+ key: oo_nodes_use_{{ (openshift_use_kuryr | default(False)) | ternary('kuryr','nothing') }}
+ changed_when: False
- include: etcd_client_config.yml
vars:
- openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv"
+ openshift_node_scale_up_group: "oo_nodes_use_flannel:oo_nodes_use_calico:oo_nodes_use_contiv:oo_nodes_use_kuryr"
- name: Additional node config
hosts: oo_nodes_use_flannel
@@ -50,3 +54,11 @@
- role: contiv
contiv_role: netplugin
when: openshift_use_contiv | default(false) | bool
+
+- name: Configure Kuryr node
+ hosts: oo_nodes_use_kuryr
+ tasks:
+ - include_role:
+ name: kuryr
+ tasks_from: node
+ when: openshift_use_kuryr | default(false) | bool
diff --git a/playbooks/common/openshift-node/clean_image.yml b/playbooks/common/openshift-node/clean_image.yml
new file mode 100644
index 000000000..38753d0af
--- /dev/null
+++ b/playbooks/common/openshift-node/clean_image.yml
@@ -0,0 +1,10 @@
+---
+- name: Configure nodes
+ hosts: oo_nodes_to_config:!oo_containerized_master_nodes
+ tasks:
+ - name: Remove any ansible facts created during AMI creation
+ file:
+ path: "/etc/ansible/facts.d/{{ item }}"
+ state: absent
+ with_items:
+ - openshift.fact
diff --git a/playbooks/common/openshift-node/config.yml b/playbooks/common/openshift-node/config.yml
index 6fd8aa6f1..4f8f98aef 100644
--- a/playbooks/common/openshift-node/config.yml
+++ b/playbooks/common/openshift-node/config.yml
@@ -1,7 +1,6 @@
---
- name: Node Install Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Node install 'In Progress'
@@ -10,6 +9,8 @@
installer_phase_node: "In Progress"
aggregate: false
+- include: certificates.yml
+
- include: setup.yml
- include: containerized_nodes.yml
@@ -23,8 +24,7 @@
- include: enable_excluders.yml
- name: Node Install Checkpoint End
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
tasks:
- name: Set Node install 'Complete'
diff --git a/playbooks/common/openshift-node/configure_nodes.yml b/playbooks/common/openshift-node/configure_nodes.yml
index c96e4921c..17259422d 100644
--- a/playbooks/common/openshift-node/configure_nodes.yml
+++ b/playbooks/common/openshift-node/configure_nodes.yml
@@ -13,4 +13,5 @@
roles:
- role: os_firewall
- role: openshift_node
+ - role: tuned
- role: nickhammond.logrotate
diff --git a/playbooks/common/openshift-node/image_prep.yml b/playbooks/common/openshift-node/image_prep.yml
new file mode 100644
index 000000000..30651a1df
--- /dev/null
+++ b/playbooks/common/openshift-node/image_prep.yml
@@ -0,0 +1,24 @@
+---
+- name: normalize groups
+ include: ../../byo/openshift-cluster/initialize_groups.yml
+
+- name: evaluate the groups
+ include: ../openshift-cluster/evaluate_groups.yml
+
+- name: initialize the facts
+ include: ../openshift-cluster/initialize_facts.yml
+
+- name: initialize the repositories
+ include: ../openshift-cluster/initialize_openshift_repos.yml
+
+- name: run node config setup
+ include: setup.yml
+
+- name: run node config
+ include: configure_nodes.yml
+
+- name: Re-enable excluders
+ include: enable_excluders.yml
+
+- name: Remove any undesired artifacts from build
+ include: clean_image.yml
diff --git a/roles/ansible_service_broker/defaults/main.yml b/roles/ansible_service_broker/defaults/main.yml
index fa982d533..dc05b03b5 100644
--- a/roles/ansible_service_broker/defaults/main.yml
+++ b/roles/ansible_service_broker/defaults/main.yml
@@ -13,7 +13,4 @@ ansible_service_broker_launch_apb_on_bind: false
ansible_service_broker_image_pull_policy: IfNotPresent
ansible_service_broker_sandbox_role: edit
-ansible_service_broker_auto_escalate: true
-ansible_service_broker_registry_tag: latest
-ansible_service_broker_registry_whitelist:
- - '.*-apb$'
+ansible_service_broker_auto_escalate: false
diff --git a/roles/ansible_service_broker/tasks/install.yml b/roles/ansible_service_broker/tasks/install.yml
index 0f4b71124..89a84c4df 100644
--- a/roles/ansible_service_broker/tasks/install.yml
+++ b/roles/ansible_service_broker/tasks/install.yml
@@ -22,19 +22,14 @@
ansible_service_broker_registry_user: "{{ ansible_service_broker_registry_user | default(__ansible_service_broker_registry_user) }}"
ansible_service_broker_registry_password: "{{ ansible_service_broker_registry_password | default(__ansible_service_broker_registry_password) }}"
ansible_service_broker_registry_organization: "{{ ansible_service_broker_registry_organization | default(__ansible_service_broker_registry_organization) }}"
-
- ansible_service_broker_certs_dir: "{{ openshift.common.config_base }}/service-catalog"
+ ansible_service_broker_registry_tag: "{{ ansible_service_broker_registry_tag | default(__ansible_service_broker_registry_tag) }}"
+ ansible_service_broker_registry_whitelist: "{{ ansible_service_broker_registry_whitelist | default(__ansible_service_broker_registry_whitelist) }}"
- name: set ansible-service-broker image facts using set prefix and tag
set_fact:
ansible_service_broker_image: "{{ ansible_service_broker_image_prefix }}ansible-service-broker:{{ ansible_service_broker_image_tag }}"
ansible_service_broker_etcd_image: "{{ ansible_service_broker_etcd_image_prefix }}etcd:{{ ansible_service_broker_etcd_image_tag }}"
-- slurp:
- src: "{{ ansible_service_broker_certs_dir }}/ca.crt"
- register: catalog_ca
-
-
- include: validate_facts.yml
@@ -79,13 +74,12 @@
state: present
name: asb-access
rules:
- - nonResourceURLs: ["/ansible-service-broker", "ansible-service-broker/*"]
+ - nonResourceURLs: ["/ansible-service-broker", "/ansible-service-broker/*"]
verbs: ["get", "post", "put", "patch", "delete"]
- name: Bind admin cluster-role to asb serviceaccount
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: admin
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
@@ -93,7 +87,6 @@
- name: Bind auth cluster role to asb service account
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: asb-auth
user: "system:serviceaccount:openshift-ansible-service-broker:asb"
@@ -101,7 +94,6 @@
- name: Bind asb-access role to asb-client service account
oc_adm_policy_user:
state: present
- namespace: openshift-ansible-service-broker
resource_kind: cluster-role
resource_name: asb-access
user: "system:serviceaccount:openshift-ansible-service-broker:asb-client"
@@ -109,6 +101,7 @@
- name: create asb-client token secret
oc_obj:
name: asb-client
+ namespace: openshift-ansible-service-broker
state: present
kind: Secret
content:
@@ -118,10 +111,20 @@
kind: Secret
metadata:
name: asb-client
+ namespace: openshift-ansible-service-broker
annotations:
kubernetes.io/service-account.name: asb-client
type: kubernetes.io/service-account-token
+- oc_secret:
+ state: list
+ namespace: openshift-ansible-service-broker
+ name: asb-client
+ register: asb_client_secret
+
+- set_fact:
+ service_ca_crt: "{{ asb_client_secret.results.results.0.data['service-ca.crt'] }}"
+
# Using oc_obj because oc_service doesn't seem to allow annotations
# TODO: Extend oc_service to allow annotations
- name: create ansible-service-broker service
@@ -137,6 +140,7 @@
kind: Service
metadata:
name: asb
+ namespace: openshift-ansible-service-broker
labels:
app: openshift-ansible-service-broker
service: asb
@@ -231,6 +235,20 @@
value: /etc/ansible-service-broker/config.yaml
resources: {}
terminationMessagePath: /tmp/termination-log
+ readinessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
+ livenessProbe:
+ httpGet:
+ port: 1338
+ path: /healthz
+ scheme: HTTPS
+ initialDelaySeconds: 15
+ timeoutSeconds: 1
- image: "{{ ansible_service_broker_etcd_image }}"
name: etcd
@@ -327,20 +345,20 @@
oc_obj:
name: ansible-service-broker
state: present
- kind: ServiceBroker
+ kind: ClusterServiceBroker
content:
path: /tmp/brokerout
data:
- apiVersion: servicecatalog.k8s.io/v1alpha1
- kind: ServiceBroker
+ apiVersion: servicecatalog.k8s.io/v1beta1
+ kind: ClusterServiceBroker
metadata:
name: ansible-service-broker
spec:
- url: http://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
+ url: https://asb.openshift-ansible-service-broker.svc:1338/ansible-service-broker
authInfo:
bearer:
secretRef:
name: asb-client
namespace: openshift-ansible-service-broker
kind: Secret
- caBundle: "{{ catalog_ca.content }}"
+ caBundle: "{{ service_ca_crt }}"
diff --git a/roles/ansible_service_broker/tasks/remove.yml b/roles/ansible_service_broker/tasks/remove.yml
index f0a6be226..51b86fb26 100644
--- a/roles/ansible_service_broker/tasks/remove.yml
+++ b/roles/ansible_service_broker/tasks/remove.yml
@@ -85,9 +85,9 @@
- name: remove secret for broker auth
oc_obj:
- name: asb-auth-secret
+ name: asb-client
namespace: openshift-ansible-service-broker
- kind: Broker
+ kind: Secret
state: absent
# TODO: saw a oc_configmap in the library, but didn't understand how to get it to do the following:
@@ -99,11 +99,17 @@
kind: ConfigMap
# TODO: Is this going to work?
+- shell: >
+ oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ register: get_apiservices
+ changed_when: no
+
- name: remove broker object from the catalog
oc_obj:
name: ansible-service-broker
state: absent
- kind: ServiceBroker
+ kind: ClusterServiceBroker
+ when: not "'not found' in get_apiservices.stdout"
- name: remove openshift-ansible-service-broker project
oc_project:
diff --git a/roles/ansible_service_broker/vars/default_images.yml b/roles/ansible_service_broker/vars/default_images.yml
index 3e9639adf..8438e993f 100644
--- a/roles/ansible_service_broker/vars/default_images.yml
+++ b/roles/ansible_service_broker/vars/default_images.yml
@@ -13,3 +13,5 @@ __ansible_service_broker_registry_url: null
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_tag: latest
+__ansible_service_broker_registry_whitelist: []
diff --git a/roles/ansible_service_broker/vars/openshift-enterprise.yml b/roles/ansible_service_broker/vars/openshift-enterprise.yml
index 9c576cb76..fc58b4fd8 100644
--- a/roles/ansible_service_broker/vars/openshift-enterprise.yml
+++ b/roles/ansible_service_broker/vars/openshift-enterprise.yml
@@ -1,7 +1,7 @@
---
__ansible_service_broker_image_prefix: registry.access.redhat.com/openshift3/ose-
-__ansible_service_broker_image_tag: v3.6
+__ansible_service_broker_image_tag: v3.7
__ansible_service_broker_etcd_image_prefix: rhel7/
__ansible_service_broker_etcd_image_tag: latest
@@ -14,3 +14,6 @@ __ansible_service_broker_registry_url: "https://registry.access.redhat.com"
__ansible_service_broker_registry_user: null
__ansible_service_broker_registry_password: null
__ansible_service_broker_registry_organization: null
+__ansible_service_broker_registry_tag: v3.7
+__ansible_service_broker_registry_whitelist:
+ - '.*-apb$'
diff --git a/roles/docker/defaults/main.yml b/roles/docker/defaults/main.yml
index e36dfa7b9..f6f2bd77e 100644
--- a/roles/docker/defaults/main.yml
+++ b/roles/docker/defaults/main.yml
@@ -1,5 +1,6 @@
---
docker_cli_auth_config_path: '/root/.docker'
+openshift_docker_signature_verification: False
# oreg_url is defined by user input.
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
@@ -19,4 +20,16 @@ l2_docker_additional_registries: "{% if openshift_docker_additional_registries i
l2_docker_blocked_registries: "{% if openshift_docker_blocked_registries is string %}{% if openshift_docker_blocked_registries == '' %}[]{% elif ',' in openshift_docker_blocked_registries %}{{ openshift_docker_blocked_registries.split(',') | list }}{% else %}{{ [ openshift_docker_blocked_registries ] }}{% endif %}{% else %}{{ openshift_docker_blocked_registries }}{% endif %}"
l2_docker_insecure_registries: "{% if openshift_docker_insecure_registries is string %}{% if openshift_docker_insecure_registries == '' %}[]{% elif ',' in openshift_docker_insecure_registries %}{{ openshift_docker_insecure_registries.split(',') | list }}{% else %}{{ [ openshift_docker_insecure_registries ] }}{% endif %}{% else %}{{ openshift_docker_insecure_registries }}{% endif %}"
+openshift_docker_use_etc_containers: False
containers_registries_conf_path: /etc/containers/registries.conf
+
+r_crio_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_crio_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+r_crio_os_firewall_deny: []
+r_crio_os_firewall_allow:
+- service: crio
+ port: 10010/tcp
+
+
+openshift_docker_is_node_or_master: "{{ True if inventory_hostname in (groups['oo_masters_to_config']|default([])) or inventory_hostname in (groups['oo_nodes_to_config']|default([])) else False | bool }}"
diff --git a/roles/docker/handlers/main.yml b/roles/docker/handlers/main.yml
index 591367467..866ed0452 100644
--- a/roles/docker/handlers/main.yml
+++ b/roles/docker/handlers/main.yml
@@ -4,6 +4,7 @@
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
+ daemon_reload: yes
register: r_docker_restart_docker_result
until: not r_docker_restart_docker_result | failed
retries: 3
diff --git a/roles/docker/meta/main.yml b/roles/docker/meta/main.yml
index b773a417c..62b8a2eb5 100644
--- a/roles/docker/meta/main.yml
+++ b/roles/docker/meta/main.yml
@@ -11,3 +11,4 @@ galaxy_info:
- 7
dependencies:
- role: lib_openshift
+- role: lib_os_firewall
diff --git a/roles/docker/tasks/crio_firewall.yml b/roles/docker/tasks/crio_firewall.yml
new file mode 100644
index 000000000..fbd1ff515
--- /dev/null
+++ b/roles/docker/tasks/crio_firewall.yml
@@ -0,0 +1,40 @@
+---
+- when: r_crio_firewall_enabled | bool and not r_crio_use_firewalld | bool
+ block:
+ - name: Add iptables allow rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: add
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_allow }}"
+
+ - name: Remove iptables rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: remove
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_deny }}"
+
+- when: r_crio_firewall_enabled | bool and r_crio_use_firewalld | bool
+ block:
+ - name: Add firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_allow }}"
+
+ - name: Remove firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ immediate: true
+ state: disabled
+ when: item.cond | default(True)
+ with_items: "{{ r_crio_os_firewall_deny }}"
diff --git a/roles/docker/tasks/main.yml b/roles/docker/tasks/main.yml
index dae17c3ce..1539af53f 100644
--- a/roles/docker/tasks/main.yml
+++ b/roles/docker/tasks/main.yml
@@ -10,13 +10,14 @@
l_use_crio: "{{ openshift_use_crio | default(False) }}"
l_use_crio_only: "{{ openshift_use_crio_only | default(False) }}"
-- when:
- - openshift_deployment_type == 'openshift-enterprise'
- assert:
- that:
- - "openshift_image_tag is defined"
- msg: >
- openshift_image_tag is a required inventory variable when installing openshift-enterprise
+- name: Add enterprise registry, if necessary
+ set_fact:
+ l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
+ when:
+ - openshift.common.deployment_type == 'openshift-enterprise'
+ - openshift_docker_ent_reg != ''
+ - openshift_docker_ent_reg not in l2_docker_additional_registries
+ - not l_use_crio_only
- name: Use Package Docker if Requested
include: package_docker.yml
@@ -34,4 +35,4 @@
include: systemcontainer_crio.yml
when:
- l_use_crio
- - inventory_hostname in groups['oo_masters_to_config'] or inventory_hostname in groups['oo_nodes_to_config']
+ - openshift_docker_is_node_or_master | bool
diff --git a/roles/docker/tasks/package_docker.yml b/roles/docker/tasks/package_docker.yml
index a35335937..c1aedf879 100644
--- a/roles/docker/tasks/package_docker.yml
+++ b/roles/docker/tasks/package_docker.yml
@@ -48,15 +48,9 @@
template:
dest: "{{ docker_systemd_dir }}/custom.conf"
src: custom.conf.j2
- when: not os_firewall_use_firewalld | default(False) | bool
-
-- name: Add enterprise registry, if necessary
- set_fact:
- l2_docker_additional_registries: "{{ l2_docker_additional_registries + [openshift_docker_ent_reg] }}"
- when:
- - openshift.common.deployment_type == 'openshift-enterprise'
- - openshift_docker_ent_reg != ''
- - openshift_docker_ent_reg not in l2_docker_additional_registries
+ notify:
+ - restart docker
+ when: not (os_firewall_use_firewalld | default(False)) | bool
- stat: path=/etc/sysconfig/docker
register: docker_check
@@ -87,6 +81,7 @@
template:
dest: "{{ containers_registries_conf_path }}"
src: registries.conf
+ when: openshift_docker_use_etc_containers | bool
notify:
- restart docker
@@ -113,11 +108,12 @@
dest: /etc/sysconfig/docker
regexp: '^OPTIONS=.*$'
line: "OPTIONS='\
- {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %}\
- {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %}\
- {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %}\
- {% if docker_options is defined %} {{ docker_options }}{% endif %}\
- {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %}'"
+ {% if ansible_selinux.status | default(None) == 'enabled' and docker_selinux_enabled | default(true) | bool %} --selinux-enabled {% endif %} \
+ {% if docker_log_driver is defined %} --log-driver {{ docker_log_driver }}{% endif %} \
+ {% if docker_log_options is defined %} {{ docker_log_options | oo_split() | oo_prepend_strings_in_list('--log-opt ') | join(' ')}}{% endif %} \
+ {% if docker_options is defined %} {{ docker_options }}{% endif %} \
+ {% if docker_disable_push_dockerhub is defined %} --confirm-def-push={{ docker_disable_push_dockerhub | bool }}{% endif %} \
+ --signature-verification={{ openshift_docker_signature_verification | bool }}'"
when: docker_check.stat.isreg is defined and docker_check.stat.isreg
notify:
- restart docker
@@ -137,17 +133,12 @@
notify:
- restart docker
-- name: Check for credentials file for registry auth
- stat:
- path: "{{ docker_cli_auth_config_path }}/config.json"
- when: oreg_auth_user is defined
- register: docker_cli_auth_credentials_stat
-
-- name: Create credentials for docker cli registry auth
- command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
- when:
- - oreg_auth_user is defined
- - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
+# The following task is needed as the systemd module may report a change in
+# state even though docker is already running.
+- name: Detect if docker is already started
+ command: "systemctl show docker -p ActiveState"
+ changed_when: False
+ register: r_docker_already_running_result
- name: Start the Docker service
systemd:
@@ -161,6 +152,8 @@
delay: 30
- set_fact:
- docker_service_status_changed: "{{ r_docker_package_docker_start_result | changed }}"
+ docker_service_status_changed: "{{ (r_docker_package_docker_start_result | changed) and (r_docker_already_running_result.stdout != 'ActiveState=active' ) }}"
+
+- include: registry_auth.yml
- meta: flush_handlers
diff --git a/roles/docker/tasks/registry_auth.yml b/roles/docker/tasks/registry_auth.yml
new file mode 100644
index 000000000..d05b7f2b8
--- /dev/null
+++ b/roles/docker/tasks/registry_auth.yml
@@ -0,0 +1,16 @@
+---
+- name: Check for credentials file for registry auth
+ stat:
+ path: "{{ docker_cli_auth_config_path }}/config.json"
+ when: oreg_auth_user is defined
+ register: docker_cli_auth_credentials_stat
+
+- name: Create credentials for docker cli registry auth
+ command: "docker --config={{ docker_cli_auth_config_path }} login -u {{ oreg_auth_user }} -p {{ oreg_auth_password }} {{ oreg_host }}"
+ register: openshift_docker_credentials_create_res
+ retries: 3
+ delay: 5
+ until: openshift_docker_credentials_create_res.rc == 0
+ when:
+ - oreg_auth_user is defined
+ - (not docker_cli_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
diff --git a/roles/docker/tasks/systemcontainer_crio.yml b/roles/docker/tasks/systemcontainer_crio.yml
index 386369d26..67ede0d21 100644
--- a/roles/docker/tasks/systemcontainer_crio.yml
+++ b/roles/docker/tasks/systemcontainer_crio.yml
@@ -3,16 +3,32 @@
# TODO: Much of this file is shared with container engine tasks
- set_fact:
l_insecure_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l2_docker_insecure_registries)) }}"
- when: l2_docker_insecure_registries
+ when: l2_docker_insecure_registries | bool
- set_fact:
l_crio_registries: "{{ l2_docker_additional_registries + ['docker.io'] }}"
- when: l2_docker_additional_registries
+ when: l2_docker_additional_registries | bool
- set_fact:
l_crio_registries: "{{ ['docker.io'] }}"
- when: not l2_docker_additional_registries
+ when: not (l2_docker_additional_registries | bool)
- set_fact:
l_additional_crio_registries: "{{ '\"{}\"'.format('\", \"'.join(l_crio_registries)) }}"
- when: l2_docker_additional_registries
+ when: l2_docker_additional_registries | bool
+
+- set_fact:
+ l_openshift_image_tag: "{{ openshift_image_tag | string }}"
+ when: openshift_image_tag is defined
+
+- set_fact:
+ l_openshift_image_tag: "latest"
+ when:
+ - openshift_image_tag is not defined
+ - openshift_release == "latest"
+
+- set_fact:
+ l_openshift_image_tag: "{{ openshift_release | string }}"
+ when:
+ - openshift_image_tag is not defined
+ - openshift_release != "latest"
- name: Ensure container-selinux is installed
package:
@@ -20,6 +36,12 @@
state: present
when: not openshift.common.is_atomic | bool
+- name: Check we are not using node as a Docker container with CRI-O
+ fail: msg='Cannot use CRI-O with node configured as a Docker container'
+ when:
+ - openshift.common.is_containerized | bool
+ - not openshift.common.is_node_system_container | bool
+
# Used to pull and install the system container
- name: Ensure atomic is installed
package:
@@ -40,7 +62,7 @@
shell: lsmod | grep overlay
register: l_has_overlay_in_kernel
ignore_errors: yes
-
+ failed_when: false
- when: l_has_overlay_in_kernel.rc != 0
block:
@@ -60,36 +82,10 @@
enabled: yes
state: restarted
-
-- block:
-
- - name: Add http_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?http_proxy[:=]{1}"
- line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
- when:
- - openshift.common.http_proxy is defined
- - openshift.common.http_proxy != ''
-
- - name: Add https_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?https_proxy[:=]{1}"
- line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
- when:
- - openshift.common.https_proxy is defined
- - openshift.common.https_proxy != ''
-
- - name: Add no_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?no_proxy[:=]{1}"
- line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
- when:
- - openshift.common.no_proxy is defined
- - openshift.common.no_proxy != ''
-
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- block:
@@ -106,10 +102,9 @@
- name: Set CRI-O image tag
set_fact:
- l_crio_image_tag: "{{ openshift_image_tag }}"
+ l_crio_image_tag: "{{ l_openshift_image_tag }}"
when:
- openshift_deployment_type == 'openshift-enterprise'
- - openshift_image_tag is defined
- name: Use RHEL based image when distribution is Red Hat
set_fact:
@@ -147,7 +142,7 @@
image: "{{ l_crio_image }}"
state: latest
-- name: Remove CRI-o default configuration files
+- name: Remove CRI-O default configuration files
file:
path: "{{ item }}"
state: absent
@@ -166,11 +161,19 @@
path: /etc/cni/net.d/
state: directory
+- name: setup firewall for CRI-O
+ include: crio_firewall.yml
+ static: yes
+
- name: Configure the CNI network
template:
dest: /etc/cni/net.d/openshift-sdn.conf
src: 80-openshift-sdn.conf.j2
+- name: Fix SELinux Permissions on /var/lib/containers
+ command: "restorecon -R /var/lib/containers/"
+ changed_when: false
+
- name: Start the CRI-O service
systemd:
name: "cri-o"
diff --git a/roles/docker/tasks/systemcontainer_docker.yml b/roles/docker/tasks/systemcontainer_docker.yml
index 5b1605b58..aa3b35ddd 100644
--- a/roles/docker/tasks/systemcontainer_docker.yml
+++ b/roles/docker/tasks/systemcontainer_docker.yml
@@ -1,5 +1,21 @@
---
+- set_fact:
+ l_openshift_image_tag: "{{ openshift_image_tag | string }}"
+ when: openshift_image_tag is defined
+
+- set_fact:
+ l_openshift_image_tag: "latest"
+ when:
+ - openshift_image_tag is not defined
+ - openshift_release == "latest"
+
+- set_fact:
+ l_openshift_image_tag: "{{ openshift_release | string }}"
+ when:
+ - openshift_image_tag is not defined
+ - openshift_release != "latest"
+
# If docker_options are provided we should fail. We should not install docker and ignore
# the users configuration. NOTE: docker_options == inventory:openshift_docker_options
- name: Fail quickly if openshift_docker_options are set
@@ -52,38 +68,10 @@
retries: 3
delay: 30
-
-# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
-# regexp: the line starts with or without #, followed by the string
-# http_proxy, then either : or =
-- block:
-
- - name: Add http_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?http_proxy[:=]{1}"
- line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
- when:
- - openshift.common.http_proxy is defined
- - openshift.common.http_proxy != ''
-
- - name: Add https_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?https_proxy[:=]{1}"
- line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
- when:
- - openshift.common.https_proxy is defined
- - openshift.common.https_proxy != ''
-
- - name: Add no_proxy to /etc/atomic.conf
- lineinfile:
- dest: /etc/atomic.conf
- regexp: "^#?no_proxy[:=]{1}"
- line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
- when:
- - openshift.common.no_proxy is defined
- - openshift.common.no_proxy != ''
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- block:
@@ -94,10 +82,9 @@
- name: Set container engine image tag
set_fact:
- l_docker_image_tag: "{{ openshift_image_tag }}"
+ l_docker_image_tag: "{{ l_openshift_image_tag }}"
when:
- openshift_deployment_type == 'openshift-enterprise'
- - openshift_image_tag is defined
- name: Use Red Hat Registry for image when distribution is Red Hat
set_fact:
@@ -186,4 +173,6 @@
- set_fact:
docker_service_status_changed: "{{ r_docker_systemcontainer_docker_start_result | changed }}"
+- include: registry_auth.yml
+
- meta: flush_handlers
diff --git a/roles/docker/templates/crio.conf.j2 b/roles/docker/templates/crio.conf.j2
index b4ee84fd0..93014a80d 100644
--- a/roles/docker/templates/crio.conf.j2
+++ b/roles/docker/templates/crio.conf.j2
@@ -13,12 +13,12 @@ runroot = "/var/run/containers/storage"
# storage_driver select which storage driver is used to manage storage
# of images and containers.
-storage_driver = "overlay2"
+storage_driver = "overlay"
# storage_option is used to pass an option to the storage driver.
storage_option = [
{% if ansible_distribution in ['RedHat', 'CentOS'] %}
- "overlay2.override_kernel_check=1"
+ "overlay.override_kernel_check=1"
{% endif %}
]
@@ -35,6 +35,10 @@ stream_address = ""
# stream_port is the port on which the stream server will listen
stream_port = "10010"
+# file_locking is whether file-based locking will be used instead of
+# in-memory locking
+file_locking = true
+
# The "crio.runtime" table contains settings pertaining to the OCI
# runtime used and options for how to set up and manage the OCI runtime.
[crio.runtime]
@@ -67,6 +71,9 @@ runtime_untrusted_workload = ""
# container runtime for all containers.
default_workload_trust = "trusted"
+# no_pivot instructs the runtime to not use pivot_root, but instead use MS_MOVE
+no_pivot = false
+
# conmon is the path to conmon binary, used for managing the runtime.
conmon = "/usr/libexec/crio/conmon"
@@ -93,6 +100,16 @@ apparmor_profile = "crio-default"
# for the runtime.
cgroup_manager = "systemd"
+# hooks_dir_path is the oci hooks directory for automatically executed hooks
+hooks_dir_path = "/usr/share/containers/oci/hooks.d"
+
+# pids_limit is the number of processes allowed in a container
+pids_limit = 1024
+
+# log_size_max is the max limit for the container log size in bytes.
+# Negative values indicate that no limit is imposed.
+log_size_max = 52428800
+
# The "crio.image" table contains settings pertaining to the
# management of OCI images.
[crio.image]
@@ -115,6 +132,10 @@ pause_command = "/pause"
# unspecified so that the default system-wide policy will be used.
signature_policy = ""
+# image_volumes controls how image volumes are handled.
+# The valid values are mkdir and ignore.
+image_volumes = "mkdir"
+
# insecure_registries is used to skip TLS verification when pulling images.
insecure_registries = [
{{ l_insecure_crio_registries|default("") }}
@@ -125,6 +146,7 @@ insecure_registries = [
registries = [
{{ l_additional_crio_registries|default("") }}
]
+
# The "crio.network" table contains settings pertaining to the
# management of CNI plugins.
[crio.network]
diff --git a/roles/docker/templates/custom.conf.j2 b/roles/docker/templates/custom.conf.j2
index 9b47cb6ab..713412473 100644
--- a/roles/docker/templates/custom.conf.j2
+++ b/roles/docker/templates/custom.conf.j2
@@ -3,3 +3,9 @@
[Unit]
Wants=iptables.service
After=iptables.service
+
+# The following line is a work-around to ensure docker is restarted whenever
+# iptables is restarted. This ensures the proper iptables rules will be in
+# place for docker.
+# Note: This will also cause docker to be stopped if iptables is stopped.
+PartOf=iptables.service
diff --git a/roles/etcd/defaults/main.yaml b/roles/etcd/defaults/main.yaml
index 18164050a..4b734d4ed 100644
--- a/roles/etcd/defaults/main.yaml
+++ b/roles/etcd/defaults/main.yaml
@@ -13,8 +13,6 @@ r_etcd_common_etcdctl_command: "{{ 'etcdctl' if r_etcd_common_etcd_runtime == 'h
# etcd server vars
etcd_conf_dir: '/etc/etcd'
-r_etcd_common_system_container_host_dir: /var/lib/etcd/etcd.etcd
-etcd_system_container_conf_dir: /var/lib/etcd/etc
etcd_conf_file: "{{ etcd_conf_dir }}/etcd.conf"
etcd_ca_file: "{{ etcd_conf_dir }}/ca.crt"
etcd_cert_file: "{{ etcd_conf_dir }}/server.crt"
@@ -54,7 +52,7 @@ etcd_is_containerized: False
etcd_is_thirdparty: False
# etcd dir vars
-etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' if r_etcd_common_etcd_runtime != 'runc' else '/var/lib/etcd/etcd.etcd/' }}"
+etcd_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
# etcd ports and protocols
etcd_client_port: 2379
@@ -70,7 +68,8 @@ etcd_listen_peer_urls: "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_
etcd_advertise_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
etcd_listen_client_urls: "{{ etcd_url_scheme }}://{{ etcd_ip }}:{{ etcd_client_port }}"
-etcd_peer: 127.0.0.1
+# required role variable
+#etcd_peer: 127.0.0.1
etcdctlv2: "etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} -C https://{{ etcd_peer }}:{{ etcd_client_port }}"
etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' else 'etcd' }}"
@@ -78,7 +77,7 @@ etcd_service: "{{ 'etcd_container' if r_etcd_common_etcd_runtime == 'docker' els
etcd_service_file: "/etc/systemd/system/{{ etcd_service }}.service"
r_etcd_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
-r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(Falsel) }}"
+r_etcd_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
etcd_systemd_dir: "/etc/systemd/system/{{ etcd_service }}.service.d"
r_etcd_os_firewall_deny: []
diff --git a/roles/etcd/tasks/auxiliary/clean_data.yml b/roles/etcd/tasks/auxiliary/clean_data.yml
index 95a0e7c0a..1ed2db5bc 100644
--- a/roles/etcd/tasks/auxiliary/clean_data.yml
+++ b/roles/etcd/tasks/auxiliary/clean_data.yml
@@ -1,5 +1,5 @@
---
- name: Remove member data
file:
- path: /var/lib/etcd/member
+ path: "{{ etcd_data_dir }}/member"
state: absent
diff --git a/roles/etcd/tasks/auxiliary/disable_etcd.yml b/roles/etcd/tasks/auxiliary/disable_etcd.yml
new file mode 100644
index 000000000..7c6d0409d
--- /dev/null
+++ b/roles/etcd/tasks/auxiliary/disable_etcd.yml
@@ -0,0 +1,5 @@
+---
+- name: Disable etcd members
+ service:
+ name: "{{ etcd_service }}"
+ state: stopped
diff --git a/roles/etcd/tasks/auxiliary/force_new_cluster.yml b/roles/etcd/tasks/auxiliary/force_new_cluster.yml
new file mode 100644
index 000000000..ae8a36130
--- /dev/null
+++ b/roles/etcd/tasks/auxiliary/force_new_cluster.yml
@@ -0,0 +1,31 @@
+---
+- name: Set ETCD_FORCE_NEW_CLUSTER=true on first etcd host
+ lineinfile:
+ line: "ETCD_FORCE_NEW_CLUSTER=true"
+ dest: /etc/etcd/etcd.conf
+ backup: true
+
+- name: Start etcd
+ systemd:
+ name: "{{ etcd_service }}"
+ state: started
+
+- name: Wait for cluster to become healthy after bringing up first member
+ command: >
+ etcdctl --cert-file {{ etcd_peer_cert_file }} --key-file {{ etcd_peer_key_file }} --ca-file {{ etcd_peer_ca_file }} --endpoint https://{{ etcd_peer }}:{{ etcd_client_port }} cluster-health
+ register: l_etcd_migrate_health
+ until: l_etcd_migrate_health.rc == 0
+ retries: 3
+ delay: 30
+
+- name: Unset ETCD_FORCE_NEW_CLUSTER=true on first etcd host
+ lineinfile:
+ line: "ETCD_FORCE_NEW_CLUSTER=true"
+ dest: /etc/etcd/etcd.conf
+ state: absent
+ backup: true
+
+- name: Restart first etcd host
+ systemd:
+ name: "{{ etcd_service }}"
+ state: restarted
diff --git a/roles/etcd/tasks/backup.archive.yml b/roles/etcd/tasks/backup.archive.yml
new file mode 100644
index 000000000..6daa6dc51
--- /dev/null
+++ b/roles/etcd/tasks/backup.archive.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/archive.yml
diff --git a/roles/etcd/tasks/backup.copy.yml b/roles/etcd/tasks/backup.copy.yml
new file mode 100644
index 000000000..cc540cbca
--- /dev/null
+++ b/roles/etcd/tasks/backup.copy.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/copy.yml
diff --git a/roles/etcd/tasks/backup.fetch.yml b/roles/etcd/tasks/backup.fetch.yml
new file mode 100644
index 000000000..26ec15043
--- /dev/null
+++ b/roles/etcd/tasks/backup.fetch.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/fetch.yml
diff --git a/roles/etcd/tasks/backup.force_new_cluster.yml b/roles/etcd/tasks/backup.force_new_cluster.yml
new file mode 100644
index 000000000..d2e866416
--- /dev/null
+++ b/roles/etcd/tasks/backup.force_new_cluster.yml
@@ -0,0 +1,12 @@
+---
+- include: backup/vars.yml
+
+- name: Move content of etcd backup under the etcd data directory
+ command: >
+ mv "{{ l_etcd_backup_dir }}/member" "{{ etcd_data_dir }}"
+
+- name: Set etcd group for the etcd data directory
+ command: >
+ chown -R etcd:etcd "{{ etcd_data_dir }}"
+
+- include: auxiliary/force_new_cluster.yml
diff --git a/roles/etcd/tasks/backup.unarchive.yml b/roles/etcd/tasks/backup.unarchive.yml
new file mode 100644
index 000000000..77a637360
--- /dev/null
+++ b/roles/etcd/tasks/backup.unarchive.yml
@@ -0,0 +1,3 @@
+---
+- include: backup/vars.yml
+- include: backup/unarchive.yml
diff --git a/roles/etcd/tasks/backup/archive.yml b/roles/etcd/tasks/backup/archive.yml
new file mode 100644
index 000000000..f6aa68a6e
--- /dev/null
+++ b/roles/etcd/tasks/backup/archive.yml
@@ -0,0 +1,5 @@
+---
+- name: Archive backup
+ archive:
+ path: "{{ l_etcd_backup_dir }}"
+ dest: "{{ l_etcd_backup_dir }}.tgz"
diff --git a/roles/etcd/tasks/backup/backup.yml b/roles/etcd/tasks/backup/backup.yml
index 42d27c081..ca0d29155 100644
--- a/roles/etcd/tasks/backup/backup.yml
+++ b/roles/etcd/tasks/backup/backup.yml
@@ -1,25 +1,9 @@
---
-# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
-# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
-# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
-- set_fact:
- l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
-
-- set_fact:
- l_etcd_data_dir: "{{ etcd_data_dir }}{{ '/etcd.etcd' if r_etcd_common_etcd_runtime == 'runc' else '' }}"
-
-- set_fact:
- l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
-
-- set_fact:
- l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
-
-- set_fact:
- l_etcd_backup_dir: "{{ l_etcd_data_dir }}/{{ l_backup_dir_name }}"
+- include: vars.yml
# TODO: replace shell module with command and update later checks
- name: Check available disk space for etcd backup
- shell: df --output=avail -k {{ l_etcd_data_dir }} | tail -n 1
+ shell: df --output=avail -k {{ etcd_data_dir }} | tail -n 1
register: l_avail_disk
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
@@ -27,7 +11,7 @@
# TODO: replace shell module with command and update later checks
- name: Check current etcd disk usage
- shell: du --exclude='*openshift-backup*' -k {{ l_etcd_data_dir }} | tail -n 1 | cut -f1
+ shell: du --exclude='*openshift-backup*' -k {{ etcd_data_dir }} | tail -n 1 | cut -f1
register: l_etcd_disk_usage
# AUDIT:changed_when: `false` because we are only inspecting
# state, not manipulating anything
@@ -60,17 +44,17 @@
- r_etcd_common_embedded_etcd | bool
- not l_ostree_booted.stat.exists | bool
-- name: Check selinux label of '{{ l_etcd_data_dir }}'
+- name: Check selinux label of '{{ etcd_data_dir }}'
command: >
- stat -c '%C' {{ l_etcd_data_dir }}
+ stat -c '%C' {{ etcd_data_dir }}
register: l_etcd_selinux_labels
- debug:
msg: "{{ l_etcd_selinux_labels }}"
-- name: Make sure the '{{ l_etcd_data_dir }}' has the proper label
+- name: Make sure the '{{ etcd_data_dir }}' has the proper label
command: >
- chcon -t svirt_sandbox_file_t "{{ l_etcd_data_dir }}"
+ chcon -t svirt_sandbox_file_t "{{ etcd_data_dir }}"
when:
- l_etcd_selinux_labels.rc == 0
- "'svirt_sandbox_file_t' not in l_etcd_selinux_labels.stdout"
@@ -84,12 +68,12 @@
# https://github.com/openshift/openshift-docs/commit/b38042de02d9780842dce95cfa0ef45d53b58bc6
- name: Check for v3 data store
stat:
- path: "{{ l_etcd_data_dir }}/member/snap/db"
+ path: "{{ etcd_data_dir }}/member/snap/db"
register: l_v3_db
- name: Copy etcd v3 data store
command: >
- cp -a {{ l_etcd_data_dir }}/member/snap/db
+ cp -a {{ etcd_data_dir }}/member/snap/db
{{ l_etcd_backup_dir }}/member/snap/
when: l_v3_db.stat.exists
diff --git a/roles/etcd/tasks/backup/copy.yml b/roles/etcd/tasks/backup/copy.yml
new file mode 100644
index 000000000..967e5ee66
--- /dev/null
+++ b/roles/etcd/tasks/backup/copy.yml
@@ -0,0 +1,5 @@
+---
+- name: Copy etcd backup
+ copy:
+ src: "{{ etcd_backup_sync_directory }}/{{ l_backup_dir_name }}.tgz"
+ dest: "{{ etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/fetch.yml b/roles/etcd/tasks/backup/fetch.yml
new file mode 100644
index 000000000..610ce1960
--- /dev/null
+++ b/roles/etcd/tasks/backup/fetch.yml
@@ -0,0 +1,8 @@
+---
+- name: Fetch etcd backup
+ fetch:
+ src: "{{ l_etcd_backup_dir }}.tgz"
+ dest: "{{ etcd_backup_sync_directory }}/"
+ flat: yes
+ fail_on_missing: yes
+ validate_checksum: yes
diff --git a/roles/etcd/tasks/backup/unarchive.yml b/roles/etcd/tasks/backup/unarchive.yml
new file mode 100644
index 000000000..a85f533c2
--- /dev/null
+++ b/roles/etcd/tasks/backup/unarchive.yml
@@ -0,0 +1,14 @@
+---
+- shell: ls /var/lib/etcd
+ register: output
+
+- debug:
+ msg: "output: {{ output }}"
+
+- name: Unarchive backup
+ # can't use unarchive https://github.com/ansible/ansible/issues/30821
+ # unarchive:
+ # src: "{{ l_etcd_backup_dir }}.tgz"
+ # dest: "{{ l_etcd_backup_dir }}"
+ command: >
+ tar -xf "{{ l_etcd_backup_dir }}.tgz" -C "{{ etcd_data_dir }}"
diff --git a/roles/etcd/tasks/backup/vars.yml b/roles/etcd/tasks/backup/vars.yml
new file mode 100644
index 000000000..3ffa641b3
--- /dev/null
+++ b/roles/etcd/tasks/backup/vars.yml
@@ -0,0 +1,15 @@
+---
+# set the etcd backup directory name here in case the tag or sufix consists of dynamic value that changes over time
+# e.g. openshift-backup-{{ lookup('pipe', 'date +%Y%m%d%H%M%S') }} value will change every second so if the date changes
+# right after setting l_etcd_incontainer_backup_dir and before l_etcd_backup_dir facts, the backup directory name is different
+- set_fact:
+ l_backup_dir_name: "openshift-backup-{{ r_etcd_common_backup_tag }}{{ r_etcd_common_backup_sufix_name }}"
+
+- set_fact:
+ l_etcd_incontainer_data_dir: "{{ etcd_data_dir }}"
+
+- set_fact:
+ l_etcd_incontainer_backup_dir: "{{ l_etcd_incontainer_data_dir }}/{{ l_backup_dir_name }}"
+
+- set_fact:
+ l_etcd_backup_dir: "{{ etcd_data_dir }}/{{ l_backup_dir_name }}"
diff --git a/roles/etcd/tasks/backup_master_etcd_certificates.yml b/roles/etcd/tasks/backup_master_etcd_certificates.yml
new file mode 100644
index 000000000..129e1831c
--- /dev/null
+++ b/roles/etcd/tasks/backup_master_etcd_certificates.yml
@@ -0,0 +1,2 @@
+---
+- include: certificates/backup_master_etcd_certificates.yml
diff --git a/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml
new file mode 100644
index 000000000..e65b3e5a2
--- /dev/null
+++ b/roles/etcd/tasks/certificates/backup_master_etcd_certificates.yml
@@ -0,0 +1,7 @@
+---
+- name: Backup master etcd certificates
+ shell: >
+ tar -czvf /etc/origin/master/master-etcd-certificate-backup-{{ ansible_date_time.epoch }}.tgz
+ /etc/origin/master/master.etcd-*
+ args:
+ warn: no
diff --git a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
index 26492fb3c..00b8f4a0b 100644
--- a/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
+++ b/roles/etcd/tasks/certificates/fetch_server_certificates_from_ca.yml
@@ -12,9 +12,6 @@
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
- "{{ etcd_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}server.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}peer.crt"
- - "{{ etcd_system_container_cert_config_dir }}/{{ etcd_cert_prefix }}ca.crt"
register: g_etcd_server_cert_stat_result
when: not etcd_certificates_redeploy | default(false) | bool
@@ -141,7 +138,6 @@
state: directory
with_items:
- "{{ etcd_cert_config_dir }}"
- - "{{ etcd_system_container_cert_config_dir }}"
when: etcd_server_certs_missing | bool
- name: Unarchive cert tarball
@@ -176,25 +172,8 @@
state: directory
with_items:
- "{{ etcd_ca_dir }}"
- - "{{ etcd_system_container_cert_config_dir }}/ca"
when: etcd_server_certs_missing | bool
-- name: Unarchive cert tarball for the system container
- unarchive:
- src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_cert_subdir }}.tgz"
- dest: "{{ etcd_system_container_cert_config_dir }}"
- when:
- - etcd_server_certs_missing | bool
- - r_etcd_common_etcd_runtime == 'runc'
-
-- name: Unarchive etcd ca cert tarballs for the system container
- unarchive:
- src: "{{ g_etcd_server_mktemp.stdout }}/{{ etcd_ca_name }}.tgz"
- dest: "{{ etcd_system_container_cert_config_dir }}/ca"
- when:
- - etcd_server_certs_missing | bool
- - r_etcd_common_etcd_runtime == 'runc'
-
- name: Delete temporary directory
local_action: file path="{{ g_etcd_server_mktemp.stdout }}" state=absent
become: no
diff --git a/roles/etcd/tasks/check_cluster_health.yml b/roles/etcd/tasks/check_cluster_health.yml
new file mode 100644
index 000000000..75c110972
--- /dev/null
+++ b/roles/etcd/tasks/check_cluster_health.yml
@@ -0,0 +1,2 @@
+---
+- include: migration/check_cluster_health.yml
diff --git a/roles/etcd/tasks/disable_etcd.yml b/roles/etcd/tasks/disable_etcd.yml
new file mode 100644
index 000000000..9202e6e48
--- /dev/null
+++ b/roles/etcd/tasks/disable_etcd.yml
@@ -0,0 +1,2 @@
+---
+- include: auxiliary/disable_etcd.yml
diff --git a/roles/etcd/tasks/fetch_backup.yml b/roles/etcd/tasks/fetch_backup.yml
new file mode 100644
index 000000000..513eed17a
--- /dev/null
+++ b/roles/etcd/tasks/fetch_backup.yml
@@ -0,0 +1,8 @@
+---
+- include: backup/vars.yml
+
+- include: backup/archive.yml
+
+- include: backup/sync_backup.yml
+
+- include: backup/
diff --git a/roles/etcd/tasks/migration/check.yml b/roles/etcd/tasks/migration/check.yml
index 0804d9e1c..5c45e5ae1 100644
--- a/roles/etcd/tasks/migration/check.yml
+++ b/roles/etcd/tasks/migration/check.yml
@@ -3,6 +3,17 @@
# Check the cluster is healthy
- include: check_cluster_health.yml
+# Check if there is at least one v2 snapshot
+- name: Check if there is at least one v2 snapshot
+ find:
+ paths: "{{ etcd_data_dir }}/member/snap"
+ patterns: '*.snap'
+ register: snapshots_result
+
+- fail:
+ msg: "Before the migration can proceed the etcd member must write down at least one snapshot under {{ etcd_data_dir }}/member/snap directory."
+ when: snapshots_result.matched | int == 0
+
# Check if the member has v3 data already
# Run the migration only if the data are v2
- name: Check if there are any v3 data
diff --git a/roles/etcd/tasks/system_container.yml b/roles/etcd/tasks/system_container.yml
index e735bf50a..f71d9b551 100644
--- a/roles/etcd/tasks/system_container.yml
+++ b/roles/etcd/tasks/system_container.yml
@@ -1,6 +1,8 @@
---
-- set_fact:
- l_etcd_src_data_dir: "{{ '/var/lib/origin/openshift.local.etcd' if r_etcd_common_embedded_etcd | bool else '/var/lib/etcd/' }}"
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
- name: Pull etcd system container
command: atomic pull --storage=ostree {{ openshift.etcd.etcd_image }}
@@ -17,6 +19,7 @@
{{ hostvars[host].etcd_hostname }}={{ etcd_peer_url_scheme }}://{{ hostvars[host].etcd_ip }}:{{ etcd_peer_port }},
{%- endif -%}
{% endfor -%}
+ when: etcd_initial_cluster is undefined
- name: Check etcd system container package
command: >
@@ -51,36 +54,13 @@
- name: Systemd reload configuration
systemd: name=etcd_container daemon_reload=yes
-- name: Check for previous etcd data store
- stat:
- path: "{{ l_etcd_src_data_dir }}/member/"
- register: src_datastore
-
-- name: Check for etcd system container data store
- stat:
- path: "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member"
- register: dest_datastore
-
-- name: Ensure that etcd system container data dirs exist
- file: path="{{ item }}" state=directory
- with_items:
- - "{{ r_etcd_common_system_container_host_dir }}/etc"
- - "{{ r_etcd_common_system_container_host_dir }}/etcd.etcd"
-
-- name: Copy etcd data store
- command: >
- cp -a {{ l_etcd_src_data_dir }}/member
- {{ r_etcd_common_system_container_host_dir }}/etcd.etcd/member
- when:
- - src_datastore.stat.exists
- - not dest_datastore.stat.exists
-
- name: Install or Update Etcd system container package
oc_atomic_container:
name: etcd
image: "{{ openshift.etcd.etcd_image }}"
state: latest
values:
+ - ETCD_DATA_DIR=/var/lib/etcd
- ETCD_LISTEN_PEER_URLS={{ etcd_listen_peer_urls }}
- ETCD_NAME={{ etcd_hostname }}
- ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}
@@ -89,11 +69,21 @@
- ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
- ETCD_INITIAL_CLUSTER_TOKEN={{ etcd_initial_cluster_token }}
- ETCD_ADVERTISE_CLIENT_URLS={{ etcd_advertise_client_urls }}
- - ETCD_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_CERT_FILE={{ etcd_system_container_conf_dir }}/server.crt
- - ETCD_KEY_FILE={{ etcd_system_container_conf_dir }}/server.key
- - ETCD_PEER_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_PEER_CERT_FILE={{ etcd_system_container_conf_dir }}/peer.crt
- - ETCD_PEER_KEY_FILE={{ etcd_system_container_conf_dir }}/peer.key
- - ETCD_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
- - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_system_container_conf_dir }}/ca.crt
+ - ETCD_CA_FILE={{ etcd_ca_file }}
+ - ETCD_CERT_FILE={{ etcd_cert_file }}
+ - ETCD_KEY_FILE={{ etcd_key_file }}
+ - ETCD_PEER_CA_FILE={{ etcd_peer_ca_file }}
+ - ETCD_PEER_CERT_FILE={{ etcd_peer_cert_file }}
+ - ETCD_PEER_KEY_FILE={{ etcd_peer_key_file }}
+ - ETCD_TRUSTED_CA_FILE={{ etcd_ca_file }}
+ - ETCD_PEER_TRUSTED_CA_FILE={{ etcd_peer_ca_file }}
+ - 'ADDTL_MOUNTS=,{"type":"bind","source":"/etc/","destination":"/etc/","options":["rbind","rw","rslave"]},{"type":"bind","source":"/var/lib/etcd","destination":"/var/lib/etcd/","options":["rbind","rw","rslave"]}'
+
+- name: Ensure etcd datadir ownership for the system container
+ file:
+ path: "{{ etcd_data_dir }}"
+ state: directory
+ mode: 0700
+ owner: root
+ group: root
+ recurse: True
diff --git a/roles/etcd/templates/etcd.conf.j2 b/roles/etcd/templates/etcd.conf.j2
index 8462bb4c8..3027a9447 100644
--- a/roles/etcd/templates/etcd.conf.j2
+++ b/roles/etcd/templates/etcd.conf.j2
@@ -29,8 +29,8 @@ ETCD_INITIAL_CLUSTER={{ etcd_hostname}}={{ etcd_initial_advertise_peer_urls }}
ETCD_INITIAL_CLUSTER_STATE={{ etcd_initial_cluster_state }}
ETCD_INITIAL_CLUSTER_TOKEN=thirdparty-etcd-cluster-1
{% else %}
-{% if initial_etcd_cluster is defined and initial_etcd_cluster %}
-ETCD_INITIAL_CLUSTER={{ initial_etcd_cluster }}
+{% if etcd_initial_cluster is defined and etcd_initial_cluster %}
+ETCD_INITIAL_CLUSTER={{ etcd_initial_cluster }}
{% else %}
ETCD_INITIAL_CLUSTER={{ initial_cluster() }}
{% endif %}
diff --git a/roles/flannel/handlers/main.yml b/roles/flannel/handlers/main.yml
index 02f5a5f64..889069485 100644
--- a/roles/flannel/handlers/main.yml
+++ b/roles/flannel/handlers/main.yml
@@ -12,3 +12,12 @@
until: not l_docker_restart_docker_in_flannel_result | failed
retries: 3
delay: 30
+
+- name: restart node
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
+ register: l_restart_node_result
+ until: not l_restart_node_result | failed
+ retries: 3
+ delay: 30
diff --git a/roles/installer_checkpoint/README.md b/roles/installer_checkpoint/README.md
index 321acca21..83e00e504 100644
--- a/roles/installer_checkpoint/README.md
+++ b/roles/installer_checkpoint/README.md
@@ -92,8 +92,7 @@ phase/component and then a final play for setting `installer_hase_initialize` to
# common/openshift-cluster/std_include.yml
---
- name: Initialization Checkpoint Start
- hosts: localhost
- connection: local
+ hosts: oo_all_hosts
gather_facts: false
roles:
- installer_checkpoint
diff --git a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
index 033240e62..b17358882 100644
--- a/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
+++ b/roles/installer_checkpoint/callback_plugins/installer_checkpoint.py
@@ -31,6 +31,7 @@ GlusterFS Install : Not Started
Hosted Install : Complete
Metrics Install : Not Started
Logging Install : Not Started
+Prometheus Install : Not Started
Service Catalog Install : Not Started
-----------------------------------------------------
@@ -49,6 +50,7 @@ GlusterFS Install : Not Started
Hosted Install : Not Started
Metrics Install : Not Started
Logging Install : Not Started
+Prometheus Install : Not Started
Service Catalog Install : Not Started
'''
@@ -70,6 +72,7 @@ class CallbackModule(CallbackBase):
# Set the order of the installer phases
installer_phases = [
'installer_phase_initialize',
+ 'installer_phase_health',
'installer_phase_etcd',
'installer_phase_nfs',
'installer_phase_loadbalancer',
@@ -80,7 +83,9 @@ class CallbackModule(CallbackBase):
'installer_phase_hosted',
'installer_phase_metrics',
'installer_phase_logging',
+ 'installer_phase_prometheus',
'installer_phase_servicecatalog',
+ 'installer_phase_management',
]
# Define the attributes of the installer phases
@@ -89,6 +94,10 @@ class CallbackModule(CallbackBase):
'title': 'Initialization',
'playbook': ''
},
+ 'installer_phase_health': {
+ 'title': 'Health Check',
+ 'playbook': 'playbooks/byo/openshift-checks/pre-install.yml'
+ },
'installer_phase_etcd': {
'title': 'etcd Install',
'playbook': 'playbooks/byo/openshift-etcd/config.yml'
@@ -129,10 +138,18 @@ class CallbackModule(CallbackBase):
'title': 'Logging Install',
'playbook': 'playbooks/byo/openshift-cluster/openshift-logging.yml'
},
+ 'installer_phase_prometheus': {
+ 'title': 'Prometheus Install',
+ 'playbook': 'playbooks/byo/openshift-cluster/openshift-prometheus.yml'
+ },
'installer_phase_servicecatalog': {
'title': 'Service Catalog Install',
'playbook': 'playbooks/byo/openshift-cluster/service-catalog.yml'
},
+ 'installer_phase_management': {
+ 'title': 'Management Install',
+ 'playbook': 'playbooks/byo/openshift-management/config.yml'
+ },
}
# Find the longest phase title
@@ -154,11 +171,6 @@ class CallbackModule(CallbackBase):
self._display.display(
'\tThis phase can be restarted by running: {}'.format(
phase_attributes[phase]['playbook']))
- else:
- # Phase was not found in custom stats
- self._display.display(
- '{}{}: {}'.format(phase_title, ' ' * padding, 'Not Started'),
- color=C.COLOR_SKIP)
self._display.display("", screen_only=True)
diff --git a/roles/kuryr/README.md b/roles/kuryr/README.md
new file mode 100644
index 000000000..7b618f902
--- /dev/null
+++ b/roles/kuryr/README.md
@@ -0,0 +1,38 @@
+## OpenStack Kuryr
+
+Install Kuryr CNI components (kuryr-controller, kuryr-cni) on Master and worker
+nodes. Kuryr uses OpenStack Networking service (Neutron) to provide network for
+pods. This allows to have interconnectivity between pods and OpenStack VMs.
+
+## Requirements
+
+* Ansible 2.2+
+* Centos/ RHEL 7.3+
+
+## Current Kuryr restrictions when used with OpenShift
+
+* Openshift Origin only
+* OpenShift on OpenStack Newton or newer (only with Trunk ports)
+
+## Key Ansible inventory Kuryr master configuration parameters
+
+* ``openshift_use_kuryr=True``
+* ``openshift_use_openshift_sdn=False``
+* ``openshift_sdn_network_plugin_name='cni'``
+* ``kuryr_cni_link_interface=eth0``
+* ``kuryr_openstack_auth_url=keystone_url``
+* ``kuryr_openstack_user_domain_name=Default``
+* ``kuryr_openstack_user_project_name=Default``
+* ``kuryr_openstack_project_id=project_uuid``
+* ``kuryr_openstack_username=kuryr``
+* ``kuryr_openstack_password=kuryr_pass``
+* ``kuryr_openstack_pod_sg_id=pod_security_group_uuid``
+* ``kuryr_openstack_pod_subnet_id=pod_subnet_uuid``
+* ``kuryr_openstack_pod_service_id=service_subnet_uuid``
+* ``kuryr_openstack_pod_project_id=pod_project_uuid``
+* ``kuryr_openstack_worker_nodes_subnet_id=worker_nodes_subnet_uuid``
+
+## Kuryr resources
+
+* [Kuryr documentation](https://docs.openstack.org/kuryr-kubernetes/latest/)
+* [Installing Kuryr containerized](https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html)
diff --git a/roles/kuryr/defaults/main.yaml b/roles/kuryr/defaults/main.yaml
new file mode 100644
index 000000000..af05d80df
--- /dev/null
+++ b/roles/kuryr/defaults/main.yaml
@@ -0,0 +1,72 @@
+---
+# Kuryr conf directory
+kuryr_config_dir: /etc/kuryr
+
+# Kuryr username
+kuryr_openstack_username: kuryr
+
+# Kuryr domain name or id containing user
+kuryr_openstack_user_domain_name: default
+
+# Kuryr domain name or id containing project
+kuryr_openstack_project_domain_name: default
+
+# Kuryr OpenShift namespace
+kuryr_namespace: kube-system
+
+# Whether to run the cni plugin in debug mode
+kuryr_cni_debug: "false"
+
+# The version of cni binaries
+cni_version: v0.5.2
+
+# Path to bin dir (where kuryr execs get installed)
+bin_dir: /usr/bin
+
+# Path to the cni binaries
+cni_bin_dir: /opt/cni/bin
+
+# URL for cni binaries
+cni_bin_url_base: "https://github.com/containernetworking/cni/releases/download/"
+cni_bin_url: "{{ cni_bin_url_base }}/{{ cni_version }}/cni-{{ cni_version }}.tgz"
+cni_bin_checksum: "71f411080245aa14d0cc06f6824e8039607dd9e9"
+
+# Kuryr ClusterRole definition
+kuryr_clusterrole:
+ name: kuryrctl
+ state: present
+ rules:
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - get
+ - list
+ - watch
+ resources:
+ - daemonsets
+ - deployments
+ - deploymentconfigs
+ - endpoints
+ - ingress
+ - nodes
+ - namespaces
+ - pods
+ - projects
+ - routes
+ - services
+ - apiGroups:
+ - ""
+ attributeRestrictions: null
+ verbs:
+ - update
+ - patch
+ resources:
+ - endpoints
+ - ingress
+ - pods
+ - namespaces
+ - nodes
+ - services
+ - services/status
+ - routes
diff --git a/roles/kuryr/meta/main.yml b/roles/kuryr/meta/main.yml
new file mode 100644
index 000000000..7fd5adf41
--- /dev/null
+++ b/roles/kuryr/meta/main.yml
@@ -0,0 +1,17 @@
+---
+galaxy_info:
+ author: Red Hat
+ description: Kuryr networking
+ company: Red Hat
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+ - system
+dependencies:
+- { role: lib_openshift }
+- { role: openshift_facts }
diff --git a/roles/kuryr/tasks/master.yaml b/roles/kuryr/tasks/master.yaml
new file mode 100644
index 000000000..55ab16f74
--- /dev/null
+++ b/roles/kuryr/tasks/master.yaml
@@ -0,0 +1,52 @@
+---
+- name: Perform OpenShit ServiceAccount config
+ include: serviceaccount.yaml
+
+- name: Create kuryr manifests tempdir
+ command: mktemp -d
+ register: manifests_tmpdir
+
+- name: Create kuryr ConfigMap manifest
+ become: yes
+ template:
+ src: configmap.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Create kuryr-controller Deployment manifest
+ become: yes
+ template:
+ src: controller-deployment.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Create kuryr-cni DaemonSet manifest
+ become: yes
+ template:
+ src: cni-daemonset.yaml.j2
+ dest: "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
+
+- name: Apply ConfigMap manifest
+ oc_obj:
+ state: present
+ kind: ConfigMap
+ name: "kuryr-config"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/configmap.yaml"
+
+- name: Apply Controller Deployment manifest
+ oc_obj:
+ state: present
+ kind: Deployment
+ name: "kuryr-controller"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/controller-deployment.yaml"
+
+- name: Apply kuryr-cni DaemonSet manifest
+ oc_obj:
+ state: present
+ kind: DaemonSet
+ name: "kuryr-cni-ds"
+ namespace: "{{ kuryr_namespace }}"
+ files:
+ - "{{ manifests_tmpdir.stdout }}/cni-daemonset.yaml"
diff --git a/roles/kuryr/tasks/node.yaml b/roles/kuryr/tasks/node.yaml
new file mode 100644
index 000000000..ffe814713
--- /dev/null
+++ b/roles/kuryr/tasks/node.yaml
@@ -0,0 +1,48 @@
+---
+- name: Create CNI bin directory
+ file:
+ state: directory
+ path: "{{ cni_bin_dir }}"
+ mode: 0755
+ owner: root
+ group: root
+ recurse: yes
+
+- name: Create CNI extraction tempdir
+ command: mktemp -d
+ register: cni_tmpdir
+
+- name: Download CNI
+ get_url:
+ url: "{{ cni_bin_url }}"
+ checksum: "sha1:{{ cni_bin_checksum }}"
+ mode: 0644
+ dest: "{{ cni_tmpdir.stdout }}"
+ register: downloaded_tarball
+
+- name: Extract CNI
+ become: yes
+ unarchive:
+ remote_src: True
+ src: "{{ downloaded_tarball.dest }}"
+ dest: "{{ cni_bin_dir }}"
+ when: downloaded_tarball.changed
+
+- name: Ensure CNI net.d exists
+ file:
+ path: /etc/cni/net.d
+ recurse: yes
+ state: directory
+
+- name: Configure OpenShift node with disabled service proxy
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift.common.service_type }}-node"
+ regexp: '^OPTIONS="?(.*?)"?$'
+ backrefs: yes
+ backup: yes
+ line: 'OPTIONS="\1 --disable dns,proxy,plugins"'
+
+- name: force node restart to disable the proxy
+ service:
+ name: "{{ openshift.common.service_type }}-node"
+ state: restarted
diff --git a/roles/kuryr/tasks/serviceaccount.yaml b/roles/kuryr/tasks/serviceaccount.yaml
new file mode 100644
index 000000000..088f13091
--- /dev/null
+++ b/roles/kuryr/tasks/serviceaccount.yaml
@@ -0,0 +1,31 @@
+---
+- name: Create Controller service account
+ oc_serviceaccount:
+ name: kuryr-controller
+ namespace: "{{ kuryr_namespace }}"
+ register: saout
+
+- name: Create a role for the Kuryr
+ oc_clusterrole: "{{ kuryr_clusterrole }}"
+
+- name: Fetch the created Kuryr controller cluster role
+ oc_clusterrole:
+ name: kuryrctl
+ state: list
+ register: crout
+
+- name: Grant Kuryr the privileged security context constraints
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: scc
+ resource_name: privileged
+ state: present
+
+- name: Assign role to Kuryr service account
+ oc_adm_policy_user:
+ user: "system:serviceaccount:{{ kuryr_namespace }}:{{ saout.results.results.0.metadata.name }}"
+ namespace: "{{ kuryr_namespace }}"
+ resource_kind: cluster-role
+ resource_name: "{{ crout.results.results.metadata.name }}"
+ state: present
diff --git a/roles/kuryr/templates/cni-daemonset.yaml.j2 b/roles/kuryr/templates/cni-daemonset.yaml.j2
new file mode 100644
index 000000000..39348ae90
--- /dev/null
+++ b/roles/kuryr/templates/cni-daemonset.yaml.j2
@@ -0,0 +1,53 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: extensions/v1beta1
+kind: DaemonSet
+metadata:
+ name: kuryr-cni-ds
+ namespace: {{ kuryr_namespace }}
+ labels:
+ tier: node
+ app: kuryr
+spec:
+ template:
+ metadata:
+ labels:
+ tier: node
+ app: kuryr
+ spec:
+ hostNetwork: true
+ tolerations:
+ - key: node-role.kubernetes.io/master
+ operator: Exists
+ effect: NoSchedule
+ serviceAccountName: kuryr-controller
+ containers:
+ - name: kuryr-cni
+ image: kuryr/cni:latest
+ imagePullPolicy: IfNotPresent
+ command: [ "cni_ds_init" ]
+ securityContext:
+ privileged: true
+ volumeMounts:
+ - name: bin
+ mountPath: /opt/cni/bin
+ - name: net-conf
+ mountPath: /etc/cni/net.d
+ - name: config-volume
+ mountPath: /tmp/kuryr/kuryr.conf
+ subPath: kuryr-cni.conf
+ - name: etc
+ mountPath: /etc
+ volumes:
+ - name: bin
+ hostPath:
+ path: {{ cni_bin_dir }}
+ - name: net-conf
+ hostPath:
+ path: /etc/cni/net.d
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ - name: etc
+ hostPath:
+ path: /etc \ No newline at end of file
diff --git a/roles/kuryr/templates/configmap.yaml.j2 b/roles/kuryr/templates/configmap.yaml.j2
new file mode 100644
index 000000000..e874d6c25
--- /dev/null
+++ b/roles/kuryr/templates/configmap.yaml.j2
@@ -0,0 +1,343 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: v1
+kind: ConfigMap
+metadata:
+ name: kuryr-config
+ namespace: {{ kuryr_namespace }}
+data:
+ kuryr.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Directory for Kuryr vif binding executables. (string value)
+ #bindir = /usr/libexec/kuryr
+
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # DEPRECATED: If set to false, the logging level will be set to WARNING instead
+ # of the default INFO level. (boolean value)
+ # This option is deprecated for removal.
+ # Its value may be silently ignored in the future.
+ #verbose = true
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/kuryr-controller.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ #use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = eth0
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # Absolute path to client cert to connect to HTTPS K8S_API (string value)
+ # ssl_client_crt_file = /etc/kuryr/controller.crt
+
+ # Absolute path client key file to connect to HTTPS K8S_API (string value)
+ # ssl_client_key_file = /etc/kuryr/controller.key
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /var/run/secrets/kubernetes.io/serviceaccount/ca.crt
+
+ # The token to talk to the k8s API
+ token_file = /var/run/secrets/kubernetes.io/serviceaccount/token
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
+
+ # The driver to determine OpenStack project for pod ports (string value)
+ pod_project_driver = default
+
+ # The driver to determine OpenStack project for services (string value)
+ service_project_driver = default
+
+ # The driver to determine Neutron subnets for pod ports (string value)
+ pod_subnets_driver = default
+
+ # The driver to determine Neutron subnets for services (string value)
+ service_subnets_driver = default
+
+ # The driver to determine Neutron security groups for pods (string value)
+ pod_security_groups_driver = default
+
+ # The driver to determine Neutron security groups for services (string value)
+ service_security_groups_driver = default
+
+ # The driver that provides VIFs for Kubernetes Pods. (string value)
+ pod_vif_driver = nested-vlan
+
+
+ [neutron]
+ # Configuration options for OpenStack Neutron
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # Authentication URL (string value)
+ auth_url = {{ kuryr_openstack_auth_url }}
+
+ # Authentication type to load (string value)
+ # Deprecated group/name - [neutron]/auth_plugin
+ auth_type = password
+
+ # Domain ID to scope to (string value)
+ user_domain_name = {{ kuryr_openstack_user_domain_name }}
+
+ # User's password (string value)
+ password = {{ kuryr_openstack_password }}
+
+ # Domain name containing project (string value)
+ project_domain_name = {{ kuryr_openstack_project_domain_name }}
+
+ # Project ID to scope to (string value)
+ # Deprecated group/name - [neutron]/tenant-id
+ project_id = {{ kuryr_openstack_project_id }}
+
+ # Token (string value)
+ #token = <None>
+
+ # Trust ID (string value)
+ #trust_id = <None>
+
+ # User's domain id (string value)
+ #user_domain_id = <None>
+
+ # User id (string value)
+ #user_id = <None>
+
+ # Username (string value)
+ # Deprecated group/name - [neutron]/user-name
+ username = {{kuryr_openstack_username }}
+
+ # Whether a plugging operation is failed if the port to plug does not become
+ # active (boolean value)
+ #vif_plugging_is_fatal = false
+
+ # Seconds to wait for port to become active (integer value)
+ #vif_plugging_timeout = 0
+
+ [neutron_defaults]
+
+ pod_security_groups = {{ kuryr_openstack_pod_sg_id }}
+ pod_subnet = {{ kuryr_openstack_pod_subnet_id }}
+ service_subnet = {{ kuryr_openstack_service_subnet_id }}
+ project = {{ kuryr_openstack_pod_project_id }}
+ # TODO (apuimedo): Remove the duplicated line just after this one once the
+ # RDO packaging contains the upstream patch
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+
+ [pod_vif_nested]
+ worker_nodes_subnet = {{ kuryr_openstack_worker_nodes_subnet_id }}
+ kuryr-cni.conf: |+
+ [DEFAULT]
+
+ #
+ # From kuryr_kubernetes
+ #
+ # If set to true, the logging level will be set to DEBUG instead of the default
+ # INFO level. (boolean value)
+ # Note: This option can be changed without restarting.
+ #debug = false
+
+ # The name of a logging configuration file. This file is appended to any
+ # existing logging configuration files. For details about logging configuration
+ # files, see the Python logging module documentation. Note that when logging
+ # configuration files are used then all logging configuration is set in the
+ # configuration file and other logging configuration options are ignored (for
+ # example, logging_context_format_string). (string value)
+ # Note: This option can be changed without restarting.
+ # Deprecated group/name - [DEFAULT]/log_config
+ #log_config_append = <None>
+
+ # Defines the format string for %%(asctime)s in log records. Default:
+ # %(default)s . This option is ignored if log_config_append is set. (string
+ # value)
+ #log_date_format = %Y-%m-%d %H:%M:%S
+
+ # (Optional) Name of log file to send logging output to. If no default is set,
+ # logging will go to stderr as defined by use_stderr. This option is ignored if
+ # log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logfile
+ #log_file = /var/log/kuryr/cni.log
+
+ # (Optional) The base directory used for relative log_file paths. This option
+ # is ignored if log_config_append is set. (string value)
+ # Deprecated group/name - [DEFAULT]/logdir
+ #log_dir = <None>
+
+ # Uses logging handler designed to watch file system. When log file is moved or
+ # removed this handler will open a new log file with specified path
+ # instantaneously. It makes sense only if log_file option is specified and
+ # Linux platform is used. This option is ignored if log_config_append is set.
+ # (boolean value)
+ #watch_log_file = false
+
+ # Use syslog for logging. Existing syslog format is DEPRECATED and will be
+ # changed later to honor RFC5424. This option is ignored if log_config_append
+ # is set. (boolean value)
+ #use_syslog = false
+
+ # Syslog facility to receive log lines. This option is ignored if
+ # log_config_append is set. (string value)
+ #syslog_log_facility = LOG_USER
+
+ # Log output to standard error. This option is ignored if log_config_append is
+ # set. (boolean value)
+ use_stderr = true
+
+ # Format string to use for log messages with context. (string value)
+ #logging_context_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [%(request_id)s %(user_identity)s] %(instance)s%(message)s
+
+ # Format string to use for log messages when context is undefined. (string
+ # value)
+ #logging_default_format_string = %(asctime)s.%(msecs)03d %(process)d %(levelname)s %(name)s [-] %(instance)s%(message)s
+
+ # Additional data to append to log message when logging level for the message
+ # is DEBUG. (string value)
+ #logging_debug_format_suffix = %(funcName)s %(pathname)s:%(lineno)d
+
+ # Prefix each line of exception output with this format. (string value)
+ #logging_exception_prefix = %(asctime)s.%(msecs)03d %(process)d ERROR %(name)s %(instance)s
+
+ # Defines the format string for %(user_identity)s that is used in
+ # logging_context_format_string. (string value)
+ #logging_user_identity_format = %(user)s %(tenant)s %(domain)s %(user_domain)s %(project_domain)s
+
+ # List of package logging levels in logger=LEVEL pairs. This option is ignored
+ # if log_config_append is set. (list value)
+ #default_log_levels = amqp=WARN,amqplib=WARN,boto=WARN,qpid=WARN,sqlalchemy=WARN,suds=INFO,oslo.messaging=INFO,iso8601=WARN,requests.packages.urllib3.connectionpool=WARN,urllib3.connectionpool=WARN,websocket=WARN,requests.packages.urllib3.util.retry=WARN,urllib3.util.retry=WARN,keystonemiddleware=WARN,routes.middleware=WARN,stevedore=WARN,taskflow=WARN,keystoneauth=WARN,oslo.cache=INFO,dogpile.core.dogpile=INFO
+
+ # Enables or disables publication of error events. (boolean value)
+ #publish_errors = false
+
+ # The format for an instance that is passed with the log message. (string
+ # value)
+ #instance_format = "[instance: %(uuid)s] "
+
+ # The format for an instance UUID that is passed with the log message. (string
+ # value)
+ #instance_uuid_format = "[instance: %(uuid)s] "
+
+ # Enables or disables fatal status of deprecations. (boolean value)
+ #fatal_deprecations = false
+
+
+ [binding]
+
+ driver = kuryr.lib.binding.drivers.vlan
+ link_iface = {{ kuryr_cni_link_interface }}
+
+ [kubernetes]
+
+ #
+ # From kuryr_kubernetes
+ #
+
+ # The root URL of the Kubernetes API (string value)
+ api_root = {{ openshift.master.api_url }}
+
+ # The token to talk to the k8s API
+ token_file = /etc/kuryr/token
+
+ # Absolute path to ca cert file to connect to HTTPS K8S_API (string value)
+ ssl_ca_crt_file = /etc/kuryr/ca.crt
+
+ # HTTPS K8S_API server identity verification (boolean value)
+ # TODO (apuimedo): Make configurable
+ ssl_verify_server_crt = True
diff --git a/roles/kuryr/templates/controller-deployment.yaml.j2 b/roles/kuryr/templates/controller-deployment.yaml.j2
new file mode 100644
index 000000000..d970270b5
--- /dev/null
+++ b/roles/kuryr/templates/controller-deployment.yaml.j2
@@ -0,0 +1,40 @@
+# More info about the template: https://docs.openstack.org/kuryr-kubernetes/latest/installation/containerized.html#generating-kuryr-resource-definitions-for-kubernetes
+
+apiVersion: apps/v1beta1
+kind: Deployment
+metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ namespace: {{ kuryr_namespace }}
+spec:
+ replicas: 1
+ template:
+ metadata:
+ labels:
+ name: kuryr-controller
+ name: kuryr-controller
+ spec:
+ serviceAccountName: kuryr-controller
+ automountServiceAccountToken: true
+ hostNetwork: true
+ containers:
+ - image: kuryr/controller:latest
+ imagePullPolicy: IfNotPresent
+ name: controller
+ terminationMessagePath: "/dev/termination-log"
+ # FIXME(dulek): This shouldn't be required, but without it selinux is
+ # complaining about access to kuryr.conf.
+ securityContext:
+ privileged: true
+ runAsUser: 0
+ volumeMounts:
+ - name: config-volume
+ mountPath: "/etc/kuryr/kuryr.conf"
+ subPath: kuryr.conf
+ volumes:
+ - name: config-volume
+ configMap:
+ name: kuryr-config
+ defaultMode: 0666
+ restartPolicy: Always
diff --git a/roles/lib_openshift/library/oc_adm_csr.py b/roles/lib_openshift/library/oc_adm_csr.py
index d1dc4caf8..324f52689 100644
--- a/roles/lib_openshift/library/oc_adm_csr.py
+++ b/roles/lib_openshift/library/oc_adm_csr.py
@@ -1478,11 +1478,23 @@ class OCcsr(OpenShiftCLI):
return False
+ def get_csr_request(self, request):
+ '''base64 decode the request object and call openssl to determine the
+ subject and specifically the CN: from the request
+
+ Output:
+ (0, '...
+ Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal
+ ...')
+ '''
+ import base64
+ return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1]
+
def match_node(self, csr):
'''match an inc csr to a node in self.nodes'''
for node in self.nodes:
- # we have a match
- if node['name'] in csr['metadata']['name']:
+ # we need to match based upon the csr's request certificate's CN
+ if node['name'] in self.get_csr_request(csr['spec']['request']):
node['csrs'][csr['metadata']['name']] = csr
# check that the username is the node and type is 'Approved'
diff --git a/roles/lib_openshift/library/oc_secret.py b/roles/lib_openshift/library/oc_secret.py
index 0614f359d..62bda33ad 100644
--- a/roles/lib_openshift/library/oc_secret.py
+++ b/roles/lib_openshift/library/oc_secret.py
@@ -1633,7 +1633,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files, force)
+ secret = self.prep_secret(files, force=force)
if secret['returncode'] != 0:
return secret
diff --git a/roles/lib_openshift/src/class/oc_adm_csr.py b/roles/lib_openshift/src/class/oc_adm_csr.py
index ea11c6ca9..22b8f9165 100644
--- a/roles/lib_openshift/src/class/oc_adm_csr.py
+++ b/roles/lib_openshift/src/class/oc_adm_csr.py
@@ -66,11 +66,23 @@ class OCcsr(OpenShiftCLI):
return False
+ def get_csr_request(self, request):
+ '''base64 decode the request object and call openssl to determine the
+ subject and specifically the CN: from the request
+
+ Output:
+ (0, '...
+ Subject: O=system:nodes, CN=system:node:ip-172-31-54-54.ec2.internal
+ ...')
+ '''
+ import base64
+ return self._run(['openssl', 'req', '-noout', '-text'], base64.b64decode(request))[1]
+
def match_node(self, csr):
'''match an inc csr to a node in self.nodes'''
for node in self.nodes:
- # we have a match
- if node['name'] in csr['metadata']['name']:
+ # we need to match based upon the csr's request certificate's CN
+ if node['name'] in self.get_csr_request(csr['spec']['request']):
node['csrs'][csr['metadata']['name']] = csr
# check that the username is the node and type is 'Approved'
diff --git a/roles/lib_openshift/src/class/oc_secret.py b/roles/lib_openshift/src/class/oc_secret.py
index 5322d6241..89e70b6b2 100644
--- a/roles/lib_openshift/src/class/oc_secret.py
+++ b/roles/lib_openshift/src/class/oc_secret.py
@@ -67,7 +67,7 @@ class OCSecret(OpenShiftCLI):
This receives a list of file names and converts it into a secret.
The secret is then written to disk and passed into the `oc replace` command.
'''
- secret = self.prep_secret(files, force)
+ secret = self.prep_secret(files, force=force)
if secret['returncode'] != 0:
return secret
diff --git a/roles/openshift_atomic/README.md b/roles/openshift_atomic/README.md
new file mode 100644
index 000000000..8c10c9991
--- /dev/null
+++ b/roles/openshift_atomic/README.md
@@ -0,0 +1,28 @@
+OpenShift Atomic
+================
+
+This role houses atomic specific tasks.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+Dependencies
+------------
+
+Example Playbook
+----------------
+
+```
+- name: Ensure atomic proxies are defined
+ hosts: localhost
+ roles:
+ - role: openshift_atomic
+```
+
+License
+-------
+
+Apache License Version 2.0
diff --git a/roles/openshift_atomic/meta/main.yml b/roles/openshift_atomic/meta/main.yml
new file mode 100644
index 000000000..ea129f514
--- /dev/null
+++ b/roles/openshift_atomic/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: OpenShift
+ description: Atomic related tasks
+ company: Red Hat, Inc
+ license: ASL 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+dependencies:
+- role: lib_openshift
diff --git a/roles/openshift_atomic/tasks/proxy.yml b/roles/openshift_atomic/tasks/proxy.yml
new file mode 100644
index 000000000..dde099984
--- /dev/null
+++ b/roles/openshift_atomic/tasks/proxy.yml
@@ -0,0 +1,32 @@
+---
+# Set http_proxy, https_proxy, and no_proxy in /etc/atomic.conf
+# regexp: the line starts with or without #, followed by the string
+# http_proxy, then either : or =
+- block:
+
+ - name: Add http_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?http_proxy[:=]{1}"
+ line: "http_proxy: {{ openshift.common.http_proxy | default('') }}"
+ when:
+ - openshift.common.http_proxy is defined
+ - openshift.common.http_proxy != ''
+
+ - name: Add https_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?https_proxy[:=]{1}"
+ line: "https_proxy: {{ openshift.common.https_proxy | default('') }}"
+ when:
+ - openshift.common.https_proxy is defined
+ - openshift.common.https_proxy != ''
+
+ - name: Add no_proxy to /etc/atomic.conf
+ lineinfile:
+ dest: /etc/atomic.conf
+ regexp: "^#?no_proxy[:=]{1}"
+ line: "no_proxy: {{ openshift.common.no_proxy | default('') }}"
+ when:
+ - openshift.common.no_proxy is defined
+ - openshift.common.no_proxy != ''
diff --git a/roles/openshift_aws/README.md b/roles/openshift_aws/README.md
index 696efbea5..4aca5c7a8 100644
--- a/roles/openshift_aws/README.md
+++ b/roles/openshift_aws/README.md
@@ -1,7 +1,29 @@
openshift_aws
==================================
-Provision AWS infrastructure helpers.
+Provision AWS infrastructure and instances.
+
+This role contains many task-areas to provision resources and perform actions
+against an AWS account for the purposes of dynamically building an openshift
+cluster.
+
+This role is primarily intended to be used with "include_role" and "tasks_from".
+
+include_role can be called from the tasks section in a play. See example
+playbook below for reference.
+
+These task-areas are:
+
+* provision a vpc: vpc.yml
+* provision elastic load balancers: elb.yml
+* upload IAM ssl certificates to use with load balancers: iam_cert.yml
+* provision an S3 bucket: s3.yml
+* provision an instance to build an AMI: provision_instance.yml
+* provision a security group in AWS: security_group.yml
+* provision ssh keys and users in AWS: ssh_keys.yml
+* provision an AMI in AWS: seal_ami.yml
+* provision scale groups: scale_group.yml
+* provision launch configs: launch_config.yml
Requirements
------------
@@ -9,57 +31,9 @@ Requirements
* Ansible 2.3
* Boto
-Role Variables
---------------
-
-From this role:
-
-| Name | Default value
-|---------------------------------------------------|-----------------------
-| openshift_aws_clusterid | default
-| openshift_aws_elb_scheme | internet-facing
-| openshift_aws_launch_config_bootstrap_token | ''
-| openshift_aws_node_group_config | {'master': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_master_volumes }}', 'tags': {'host-type': 'master', 'sub-host-type': 'default'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'wait_for_instances': True, 'max_size': 3}, 'tags': '{{ openshift_aws_node_group_config_tags }}', 'compute': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'compute'}, 'min_size': 3, 'instance_type': 'm4.xlarge', 'desired_size': 3, 'max_size': 100}, 'infra': {'ami': '{{ openshift_aws_ami }}', 'health_check': {'type': 'EC2', 'period': 60}, 'volumes': '{{ openshift_aws_node_group_config_node_volumes }}', 'tags': {'host-type': 'node', 'sub-host-type': 'infra'}, 'min_size': 2, 'instance_type': 'm4.xlarge', 'desired_size': 2, 'max_size': 20}}
-| openshift_aws_ami_copy_wait | False
-| openshift_aws_users | []
-| openshift_aws_launch_config_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
-| openshift_aws_create_vpc | False
-| openshift_aws_node_group_type | master
-| openshift_aws_elb_cert_arn | ''
-| openshift_aws_kubernetes_cluster_status | owned
-| openshift_aws_s3_mode | create
-| openshift_aws_vpc | {'subnets': {'us-east-1': [{'cidr': '172.31.48.0/20', 'az': 'us-east-1c'}, {'cidr': '172.31.32.0/20', 'az': 'us-east-1e'}, {'cidr': '172.31.16.0/20', 'az': 'us-east-1a'}]}, 'cidr': '172.31.0.0/16', 'name': '{{ openshift_aws_vpc_name }}'}
-| openshift_aws_create_ssh_keys | False
-| openshift_aws_iam_kms_alias | alias/{{ openshift_aws_clusterid }}_kms
-| openshift_aws_use_custom_ami | False
-| openshift_aws_ami_copy_src_region | {{ openshift_aws_region }}
-| openshift_aws_s3_bucket_name | {{ openshift_aws_clusterid }}
-| openshift_aws_elb_health_check | {'response_timeout': 5, 'ping_port': 443, 'ping_protocol': 'tcp', 'interval': 30, 'healthy_threshold': 2, 'unhealthy_threshold': 2}
-| openshift_aws_node_security_groups | {'default': {'rules': [{'to_port': 22, 'from_port': 22, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 'all', 'from_port': 'all', 'proto': 'all', 'group_name': '{{ openshift_aws_clusterid }}'}], 'name': '{{ openshift_aws_clusterid }}', 'desc': '{{ openshift_aws_clusterid }} default'}, 'master': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_master', 'desc': '{{ openshift_aws_clusterid }} master instances'}, 'compute': {'name': '{{ openshift_aws_clusterid }}_compute', 'desc': '{{ openshift_aws_clusterid }} compute node instances'}, 'etcd': {'name': '{{ openshift_aws_clusterid }}_etcd', 'desc': '{{ openshift_aws_clusterid }} etcd instances'}, 'infra': {'rules': [{'to_port': 80, 'from_port': 80, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 443, 'from_port': 443, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}, {'to_port': 32000, 'from_port': 30000, 'cidr_ip': '0.0.0.0/0', 'proto': 'tcp'}], 'name': '{{ openshift_aws_clusterid }}_infra', 'desc': '{{ openshift_aws_clusterid }} infra node instances'}}
-| openshift_aws_elb_security_groups | ['{{ openshift_aws_clusterid }}', '{{ openshift_aws_clusterid }}_{{ openshift_aws_node_group_type }}']
-| openshift_aws_vpc_tags | {'Name': '{{ openshift_aws_vpc_name }}'}
-| openshift_aws_create_security_groups | False
-| openshift_aws_create_iam_cert | False
-| openshift_aws_create_scale_group | True
-| openshift_aws_ami_encrypt | False
-| openshift_aws_node_group_config_node_volumes | [{'volume_size': 100, 'delete_on_termination': True, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
-| openshift_aws_elb_instance_filter | {'tag:host-type': '{{ openshift_aws_node_group_type }}', 'tag:clusterid': '{{ openshift_aws_clusterid }}', 'instance-state-name': 'running'}
-| openshift_aws_region | us-east-1
-| openshift_aws_elb_name | {{ openshift_aws_clusterid }}-{{ openshift_aws_node_group_type }}
-| openshift_aws_elb_idle_timout | 400
-| openshift_aws_subnet_name | us-east-1c
-| openshift_aws_node_group_config_tags | {{ openshift_aws_clusterid | openshift_aws_build_instance_tags(openshift_aws_kubernetes_cluster_status) }}
-| openshift_aws_create_launch_config | True
-| openshift_aws_ami_tags | {'bootstrap': 'true', 'clusterid': '{{ openshift_aws_clusterid }}', 'openshift-created': 'true'}
-| openshift_aws_ami_name | openshift-gi
-| openshift_aws_node_group_config_master_volumes | [{'volume_size': 100, 'delete_on_termination': False, 'device_type': 'gp2', 'device_name': '/dev/sdb'}]
-| openshift_aws_vpc_name | {{ openshift_aws_clusterid }}
-| openshift_aws_elb_listeners | {'master': {'internal': [{'instance_port': 80, 'instance_protocol': 'tcp', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'tcp', 'load_balancer_port': 443, 'protocol': 'tcp'}], 'external': [{'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 80, 'protocol': 'tcp'}, {'instance_port': 443, 'instance_protocol': 'ssl', 'load_balancer_port': 443, 'ssl_certificate_id': '{{ openshift_aws_elb_cert_arn }}', 'protocol': 'ssl'}]}}
-|
-
-
-Dependencies
-------------
+Appropriate AWS credentials and permissions are required.
+
+
Example Playbook
@@ -72,7 +46,6 @@ Example Playbook
vars:
openshift_aws_clusterid: test
openshift_aws_region: us-east-1
- openshift_aws_create_vpc: true
```
License
diff --git a/roles/openshift_aws/defaults/main.yml b/roles/openshift_aws/defaults/main.yml
index 94c0f4472..5371588cf 100644
--- a/roles/openshift_aws/defaults/main.yml
+++ b/roles/openshift_aws/defaults/main.yml
@@ -1,11 +1,9 @@
---
-openshift_aws_create_vpc: True
openshift_aws_create_s3: True
openshift_aws_create_iam_cert: True
openshift_aws_create_security_groups: True
openshift_aws_create_launch_config: True
openshift_aws_create_scale_group: True
-openshift_aws_kubernetes_cluster_status: owned # or shared
openshift_aws_node_group_type: master
openshift_aws_wait_for_ssh: True
@@ -14,6 +12,7 @@ openshift_aws_clusterid: default
openshift_aws_region: us-east-1
openshift_aws_vpc_name: "{{ openshift_aws_clusterid }}"
openshift_aws_build_ami_group: "{{ openshift_aws_clusterid }}"
+openshift_aws_kubernetes_cluster_status: "{{ openshift_aws_clusterid }}"
openshift_aws_iam_cert_name: "{{ openshift_aws_clusterid }}-master-external"
openshift_aws_iam_cert_path: ''
@@ -90,6 +89,10 @@ openshift_aws_node_group_config_node_volumes:
delete_on_termination: True
openshift_aws_node_group_config_tags: "{{ openshift_aws_clusterid | build_instance_tags(openshift_aws_kubernetes_cluster_status) }}"
+openshift_aws_node_group_termination_policy: Default
+openshift_aws_node_group_replace_instances: []
+openshift_aws_node_group_replace_all_instances: False
+openshift_aws_node_group_config_extra_labels: {}
openshift_aws_node_group_config:
tags: "{{ openshift_aws_node_group_config_tags }}"
@@ -106,7 +109,11 @@ openshift_aws_node_group_config:
tags:
host-type: master
sub-host-type: default
+ labels:
+ type: master
wait_for_instances: True
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
compute:
instance_type: m4.xlarge
ami: "{{ openshift_aws_ami }}"
@@ -120,6 +127,10 @@ openshift_aws_node_group_config:
tags:
host-type: node
sub-host-type: compute
+ labels:
+ type: compute
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
infra:
instance_type: m4.xlarge
ami: "{{ openshift_aws_ami }}"
@@ -133,6 +144,10 @@ openshift_aws_node_group_config:
tags:
host-type: node
sub-host-type: infra
+ labels:
+ type: infra
+ termination_policy: "{{ openshift_aws_node_group_termination_policy }}"
+ replace_all_instances: "{{ openshift_aws_node_group_replace_all_instances }}"
openshift_aws_elb_security_groups:
- "{{ openshift_aws_clusterid }}"
@@ -212,3 +227,7 @@ openshift_aws_vpc:
az: "us-east-1e"
- cidr: 172.31.16.0/20
az: "us-east-1a"
+
+openshift_aws_node_run_bootstrap_startup: True
+openshift_aws_node_user_data: ''
+openshift_aws_node_config_namespace: openshift-node
diff --git a/roles/openshift_aws/tasks/launch_config.yml b/roles/openshift_aws/tasks/launch_config.yml
index e6be9969c..8b7b02a0e 100644
--- a/roles/openshift_aws/tasks/launch_config.yml
+++ b/roles/openshift_aws/tasks/launch_config.yml
@@ -4,6 +4,11 @@
when:
- openshift_aws_ami is undefined
+- fail:
+ msg: "Ensure that openshift_deployment_type is defined."
+ when:
+ - openshift_deployment_type is undefined
+
- name: query vpc
ec2_vpc_net_facts:
region: "{{ openshift_aws_region }}"
@@ -27,23 +32,7 @@
image_id: "{{ openshift_aws_ami }}"
instance_type: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].instance_type }}"
security_groups: "{{ openshift_aws_launch_config_security_group_id | default(ec2sgs.security_groups | map(attribute='group_id')| list) }}"
- user_data: |-
- #cloud-config
- {% if openshift_aws_node_group_type != 'master' %}
- write_files:
- - path: /root/csr_kubeconfig
- owner: root:root
- permissions: '0640'
- content: {{ openshift_aws_launch_config_bootstrap_token | default('') | to_yaml }}
- - path: /root/openshift_settings
- owner: root:root
- permissions: '0640'
- content:
- openshift_type: "{{ openshift_aws_node_group_type }}"
- runcmd:
- - [ systemctl, enable, atomic-openshift-node]
- - [ systemctl, start, atomic-openshift-node]
- {% endif %}
+ user_data: "{{ lookup('template', 'user_data.j2') }}"
key_name: "{{ openshift_aws_ssh_key_name }}"
ebs_optimized: False
volumes: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].volumes }}"
diff --git a/roles/openshift_aws/tasks/master_facts.yml b/roles/openshift_aws/tasks/master_facts.yml
new file mode 100644
index 000000000..737cfc7a6
--- /dev/null
+++ b/roles/openshift_aws/tasks/master_facts.yml
@@ -0,0 +1,22 @@
+---
+- name: fetch elbs
+ ec2_elb_facts:
+ region: "{{ openshift_aws_region }}"
+ names:
+ - "{{ item }}"
+ with_items:
+ - "{{ openshift_aws_elb_name }}-external"
+ - "{{ openshift_aws_elb_name }}-internal"
+ delegate_to: localhost
+ register: elbs
+
+- debug: var=elbs
+
+- name: set fact
+ set_fact:
+ openshift_master_cluster_hostname: "{{ elbs.results[1].elbs[0].dns_name }}"
+ osm_custom_cors_origins:
+ - "{{ elbs.results[1].elbs[0].dns_name }}"
+ - "console.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ - "api.{{ openshift_aws_clusterid | default('default') }}.openshift.com"
+ with_items: "{{ groups['masters'] }}"
diff --git a/roles/openshift_aws/tasks/provision.yml b/roles/openshift_aws/tasks/provision.yml
index a2920b744..a8518d43a 100644
--- a/roles/openshift_aws/tasks/provision.yml
+++ b/roles/openshift_aws/tasks/provision.yml
@@ -1,16 +1,8 @@
---
-- when: openshift_aws_create_vpc | bool
- name: create default vpc
- include: vpc.yml
-
- when: openshift_aws_create_iam_cert | bool
name: create the iam_cert for elb certificate
include: iam_cert.yml
-- when: openshift_aws_users | length > 0
- name: create aws ssh keypair
- include: ssh_keys.yml
-
- when: openshift_aws_create_s3 | bool
name: create s3 bucket for registry
include: s3.yml
diff --git a/roles/openshift_aws/tasks/build_ami.yml b/roles/openshift_aws/tasks/provision_instance.yml
index 48555e5da..25ae6ce1c 100644
--- a/roles/openshift_aws/tasks/build_ami.yml
+++ b/roles/openshift_aws/tasks/provision_instance.yml
@@ -1,15 +1,7 @@
---
-- when: openshift_aws_create_vpc | bool
- name: create a vpc
- include: vpc.yml
-
-- when: openshift_aws_users | length > 0
- name: create aws ssh keypair
- include: ssh_keys.yml
-
-- when: openshift_aws_create_security_groups | bool
- name: Create compute security_groups
- include: security_group.yml
+- name: set openshift_node_bootstrap to True when building AMI
+ set_fact:
+ openshift_node_bootstrap: True
- name: query vpc
ec2_vpc_net_facts:
@@ -33,7 +25,7 @@
key_name: "{{ openshift_aws_ssh_key_name }}"
group: "{{ openshift_aws_build_ami_group }}"
instance_type: m4.xlarge
- vpc_subnet_id: "{{ subnetout.subnets[0].id }}"
+ vpc_subnet_id: "{{ openshift_aws_subnet_id | default(subnetout.subnets[0].id) }}"
image: "{{ openshift_aws_base_ami }}"
volumes:
- device_name: /dev/sdb
@@ -46,3 +38,26 @@
Name: "{{ openshift_aws_base_ami_name }}"
instance_tags:
Name: "{{ openshift_aws_base_ami_name }}"
+
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:Name": "{{ openshift_aws_base_ami_name }}"
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ instancesout.instances[0].public_ip_address }}"
+ timeout: 300
+ search_regex: OpenSSH
+
+- name: add host to nodes
+ add_host:
+ groups: nodes
+ name: "{{ instancesout.instances[0].public_dns_name }}"
diff --git a/roles/openshift_aws/tasks/scale_group.yml b/roles/openshift_aws/tasks/scale_group.yml
index 3e969fc43..eb31636e7 100644
--- a/roles/openshift_aws/tasks/scale_group.yml
+++ b/roles/openshift_aws/tasks/scale_group.yml
@@ -28,5 +28,7 @@
load_balancers: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].elbs if 'elbs' in openshift_aws_node_group_config[openshift_aws_node_group_type] else omit }}"
wait_for_instances: "{{ openshift_aws_node_group_config[openshift_aws_node_group_type].wait_for_instances | default(False)}}"
vpc_zone_identifier: "{{ subnetout.subnets[0].id }}"
+ replace_instances: "{{ openshift_aws_node_group_replace_instances if openshift_aws_node_group_replace_instances != [] else omit }}"
+ replace_all_instances: "{{ omit if openshift_aws_node_group_replace_instances != [] else (openshift_aws_node_group_config[openshift_aws_node_group_type].replace_all_instances | default(omit)) }}"
tags:
- "{{ openshift_aws_node_group_config.tags | combine(openshift_aws_node_group_config[openshift_aws_node_group_type].tags) }}"
diff --git a/roles/openshift_aws/tasks/setup_master_group.yml b/roles/openshift_aws/tasks/setup_master_group.yml
new file mode 100644
index 000000000..166f3b938
--- /dev/null
+++ b/roles/openshift_aws/tasks/setup_master_group.yml
@@ -0,0 +1,35 @@
+---
+- name: Alert user to variables needed - clusterid
+ debug:
+ msg: "openshift_aws_clusterid={{ openshift_aws_clusterid }}"
+
+- name: Alert user to variables needed - region
+ debug:
+ msg: "openshift_aws_region={{ openshift_aws_region }}"
+
+- name: fetch newly created instances
+ ec2_remote_facts:
+ region: "{{ openshift_aws_region }}"
+ filters:
+ "tag:clusterid": "{{ openshift_aws_clusterid }}"
+ "tag:host-type": master
+ instance-state-name: running
+ register: instancesout
+ retries: 20
+ delay: 3
+ until: instancesout.instances|length > 0
+
+- name: add new master to masters group
+ add_host:
+ groups: "masters,etcd,nodes"
+ name: "{{ item.public_dns_name }}"
+ hostname: "{{ openshift_aws_clusterid }}-master-{{ item.id[:-5] }}"
+ with_items: "{{ instancesout.instances }}"
+
+- name: wait for ssh to become available
+ wait_for:
+ port: 22
+ host: "{{ item.public_dns_name }}"
+ timeout: 300
+ search_regex: OpenSSH
+ with_items: "{{ instancesout.instances }}"
diff --git a/roles/openshift_aws/templates/user_data.j2 b/roles/openshift_aws/templates/user_data.j2
new file mode 100644
index 000000000..76aebdcea
--- /dev/null
+++ b/roles/openshift_aws/templates/user_data.j2
@@ -0,0 +1,26 @@
+{% if openshift_aws_node_user_data is defined and openshift_aws_node_user_data != '' %}
+{{ openshift_aws_node_user_data }}
+{% else %}
+#cloud-config
+write_files:
+- path: /root/openshift_bootstrap/openshift_settings.yaml
+ owner: 'root:root'
+ permissions: '0640'
+ content: |
+ openshift_group_type: {{ openshift_aws_node_group_type }}
+{% if openshift_aws_node_group_type != 'master' %}
+- path: /etc/origin/node/bootstrap.kubeconfig
+ owner: 'root:root'
+ permissions: '0640'
+ encoding: b64
+ content: {{ openshift_aws_launch_config_bootstrap_token | b64encode }}
+{% endif %}
+runcmd:
+{% if openshift_aws_node_run_bootstrap_startup %}
+- [ ansible-playbook, /root/openshift_bootstrap/bootstrap.yml]
+{% endif %}
+{% if openshift_aws_node_group_type != 'master' %}
+- [ systemctl, enable, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
+- [ systemctl, start, {% if openshift_deployment_type == 'openshift-enterprise' %}atomic-openshift{% else %}origin{% endif %}-node]
+{% endif %}
+{% endif %}
diff --git a/roles/openshift_ca/tasks/main.yml b/roles/openshift_ca/tasks/main.yml
index 419679bc2..fad1ff5de 100644
--- a/roles/openshift_ca/tasks/main.yml
+++ b/roles/openshift_ca/tasks/main.yml
@@ -18,9 +18,7 @@
- name: Reload generated facts
openshift_facts:
- when: install_result | changed
- delegate_to: "{{ openshift_ca_host }}"
- run_once: true
+ when: hostvars[openshift_ca_host].install_result | changed
- name: Create openshift_ca_config_dir if it does not exist
file:
diff --git a/roles/openshift_cfme/README.md b/roles/openshift_cfme/README.md
deleted file mode 100644
index 8283afed6..000000000
--- a/roles/openshift_cfme/README.md
+++ /dev/null
@@ -1,404 +0,0 @@
-# OpenShift-Ansible - CFME Role
-
-# PROOF OF CONCEPT - Alpha Version
-
-This role is based on the work in the upstream
-[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods)
-project. For additional literature on configuration specific to
-ManageIQ (optional post-installation tasks), visit the project's
-[upstream documentation page](http://manageiq.org/docs/get-started/basic-configuration).
-
-Please submit a
-[new issue](https://github.com/openshift/openshift-ansible/issues/new)
-if you run into bugs with this role or wish to request enhancements.
-
-# Important Notes
-
-This is an early *proof of concept* role to install the Cloud Forms
-Management Engine (ManageIQ) on OpenShift Container Platform (OCP).
-
-* This role is still in **ALPHA STATUS**
-* Many options are hard-coded still (ex: NFS setup)
-* Not many configurable options yet
-* **Should** be ran on a dedicated cluster
-* **Will not run** on undersized infra
-* The terms *CFME* and *MIQ* / *ManageIQ* are interchangeable
-
-## Requirements
-
-**NOTE:** These requirements are copied from the upstream
-[manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods)
-project.
-
-### Prerequisites:
-
-*
- [OpenShift Origin 1.5](https://docs.openshift.com/container-platform/3.5/welcome/index.html)
- or
- [higher](https://docs.openshift.com/container-platform/latest/welcome/index.html)
- provisioned
-* NFS or other compatible volume provider
-* A cluster-admin user (created by role if required)
-
-### Cluster Sizing
-
-In order to avoid random deployment failures due to resource
-starvation, we recommend a minimum cluster size for a **test**
-environment.
-
-| Type | Size | CPUs | Memory |
-|----------------|---------|----------|----------|
-| Masters | `1+` | `8` | `12GB` |
-| Nodes | `2+` | `4` | `8GB` |
-| PV Storage | `25GB` | `N/A` | `N/A` |
-
-
-![Basic CFME Deployment](img/CFMEBasicDeployment.png)
-
-**CFME has hard-requirements for memory. CFME will NOT install if your
- infrastructure does not meet or exceed the requirements given
- above. Do not run this playbook if you do not have the required
- memory, you will just waste your time.**
-
-
-### Other sizing considerations
-
-* Recommendations assume MIQ will be the **only application running**
- on this cluster.
-* Alternatively, you can provision an infrastructure node to run
- registry/metrics/router/logging pods.
-* Each MIQ application pod will consume at least `3GB` of RAM on initial
- deployment (blank deployment without providers).
-* RAM consumption will ramp up higher depending on appliance use, once
- providers are added expect higher resource consumption.
-
-
-### Assumptions
-
-1) You meet/exceed the [cluster sizing](#cluster-sizing) requirements
-1) Your NFS server is on your master host
-1) Your PV backing NFS storage volume is mounted on `/exports/`
-
-Required directories that NFS will export to back the PVs:
-
-* `/exports/miq-pv0[123]`
-
-If the required directories are not present at install-time, they will
-be created using the recommended permissions per the
-[upstream documentation](https://github.com/ManageIQ/manageiq-pods#make-persistent-volumes-to-host-the-miq-database-and-application-data):
-
-* UID/GID: `root`/`root`
-* Mode: `0775`
-
-**IMPORTANT:** If you are using a separate volume (`/dev/vdX`) for NFS
- storage, **ensure** it is mounted on `/exports/` **before** running
- this role.
-
-
-
-## Role Variables
-
-Core variables in this role:
-
-| Name | Default value | Description |
-|-------------------------------|---------------|---------------|
-| `openshift_cfme_install_app` | `False` | `True`: Install everything and create a new CFME app, `False`: Just install all of the templates and scaffolding |
-
-
-Variables you may override have defaults defined in
-[defaults/main.yml](defaults/main.yml).
-
-
-# Important Notes
-
-This is a **tech preview** status role presently. Use it with the same
-caution you would give any other pre-release software.
-
-**Most importantly** follow this one rule: don't re-run the entrypoint
-playbook multiple times in a row without cleaning up after previous
-runs if some of the CFME steps have ran. This is a known
-flake. Cleanup instructions are provided at the bottom of this README.
-
-
-# Usage
-
-This section describes the basic usage of this role. All parameters
-will use their [default values](defaults/main.yml).
-
-## Pre-flight Checks
-
-**IMPORTANT:** As documented above in [the prerequisites](#prerequisites),
- you **must already** have your OCP cluster up and running.
-
-**Optional:** The ManageIQ pod is fairly large (about 1.7 GB) so to
-save some spin-up time post-deployment, you can begin pre-pulling the
-docker image to each of your nodes now:
-
-```
-root@node0x # docker pull docker.io/manageiq/manageiq-pods:app-latest-fine
-```
-
-## Getting Started
-
-1) The *entry point playbook* to install CFME is located in
-[the BYO playbooks](../../playbooks/byo/openshift-cfme/config.yml)
-directory
-
-2) Update your existing `hosts` inventory file and ensure the
-parameter `openshift_cfme_install_app` is set to `True` under the
-`[OSEv3:vars]` block.
-
-2) Using your existing `hosts` inventory file, run `ansible-playbook`
-with the entry point playbook:
-
-```
-$ ansible-playbook -v -i <INVENTORY_FILE> playbooks/byo/openshift-cfme/config.yml
-```
-
-## Next Steps
-
-Once complete, the playbook will let you know:
-
-
-```
-TASK [openshift_cfme : Status update] *********************************************************
-ok: [ho.st.na.me] => {
- "msg": "CFME has been deployed. Note that there will be a delay before it is fully initialized.\n"
-}
-```
-
-This will take several minutes (*possibly 10 or more*, depending on
-your network connection). However, you can get some insight into the
-deployment process during initialization.
-
-### oc describe pod manageiq-0
-
-*Some useful information about the output you will see if you run the
-`oc describe pod manageiq-0` command*
-
-**Readiness probe**s - These will take a while to become
-`Healthy`. The initial health probes won't even happen for at least 8
-minutes depending on how long it takes you to pull down the large
-images. ManageIQ is a large application so it may take a considerable
-amount of time for it to deploy and be marked as `Healthy`.
-
-If you go to the node you know the application is running on (check
-for `Successfully assigned manageiq-0 to <HOST|IP>` in the `describe`
-output) you can run a `docker pull` command to monitor the progress of
-the image pull:
-
-```
-[root@cfme-node ~]# docker pull docker.io/manageiq/manageiq-pods:app-latest-fine
-Trying to pull repository docker.io/manageiq/manageiq-pods ...
-sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a: Pulling from docker.io/manageiq/manageiq-pods
-Digest: sha256:6c055ca9d3c65cd694d6c0e28986b5239ba56bbdf0488cccdaa283d545258f8a
-Status: Image is up to date for docker.io/manageiq/manageiq-pods:app-latest-fine
-```
-
-The example above demonstrates the case where the image has been
-successfully pulled already.
-
-If the image isn't completely pulled already then you will see
-multiple progress bars detailing each image layer download status.
-
-
-### rsh
-
-*Useful inspection/progress monitoring techniques with the `oc rsh`
-command.*
-
-
-On your master node, switch to the `cfme` project (or whatever you
-named it if you overrode the `openshift_cfme_project` variable) and
-check on the pod states:
-
-```
-[root@cfme-master01 ~]# oc project cfme
-Now using project "cfme" on server "https://10.10.0.100:8443".
-
-[root@cfme-master01 ~]# oc get pod
-NAME READY STATUS RESTARTS AGE
-manageiq-0 0/1 Running 0 14m
-memcached-1-3lk7g 1/1 Running 0 14m
-postgresql-1-12slb 1/1 Running 0 14m
-```
-
-Note how the `manageiq-0` pod says `0/1` under the **READY**
-column. After some time (depending on your network connection) you'll
-be able to `rsh` into the pod to find out more of what's happening in
-real time. First, the easy-mode command, run this once `rsh` is
-available and then watch until it says `Started Initialize Appliance
-Database`:
-
-```
-[root@cfme-master01 ~]# oc rsh manageiq-0 journalctl -f -u appliance-initialize.service
-```
-
-For the full explanation of what this means, and more interactive
-inspection techniques, keep reading on.
-
-To obtain a shell on our `manageiq` pod we use this command:
-
-```
-[root@cfme-master01 ~]# oc rsh manageiq-0 bash -l
-```
-
-The `rsh` command opens a shell in your pod for you. In this case it's
-the pod called `manageiq-0`. `systemd` is managing the services in
-this pod so we can use the `list-units` command to see what is running
-currently: `# systemctl list-units | grep appliance`.
-
-If you see the `appliance-initialize` service running, this indicates
-that basic setup is still in progress. We can monitor the process with
-the `journalctl` command like so:
-
-
-```
-[root@manageiq-0 vmdb]# journalctl -f -u appliance-initialize.service
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking deployment status ==
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: No pre-existing EVM configuration found on region PV
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Checking for existing data on server PV ==
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Starting New Deployment ==
-Jun 14 14:55:52 manageiq-0 appliance-initialize.sh[58]: == Applying memcached config ==
-Jun 14 14:55:53 manageiq-0 appliance-initialize.sh[58]: == Initializing Appliance ==
-Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: create encryption key
-Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: configuring external database
-Jun 14 14:55:57 manageiq-0 appliance-initialize.sh[58]: Checking for connections to the database...
-Jun 14 14:56:09 manageiq-0 appliance-initialize.sh[58]: Create region starting
-Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: Create region complete
-Jun 14 14:58:15 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data ==
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Initializing PV data backup ==
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sending incremental file list
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: created directory /persistent/server-deploy/backup/backup_2017_06_14_145816
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/REGION
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/certs/v2_key
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: region-data/var/www/miq/vmdb/config/database.yml
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: server-data/var/www/miq/vmdb/GUID
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: sent 1330 bytes received 136 bytes 2932.00 bytes/sec
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: total size is 770 speedup is 0.53
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: == Restoring PV data symlinks ==
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/REGION symlink is already in place, skipping
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/config/database.yml symlink is already in place, skipping
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/certs/v2_key symlink is already in place, skipping
-Jun 14 14:58:16 manageiq-0 appliance-initialize.sh[58]: /var/www/miq/vmdb/log symlink is already in place, skipping
-Jun 14 14:58:28 manageiq-0 systemctl[304]: Removed symlink /etc/systemd/system/multi-user.target.wants/appliance-initialize.service.
-Jun 14 14:58:29 manageiq-0 systemd[1]: Started Initialize Appliance Database.
-```
-
-Most of what we see here (above) is the initial database seeding
-process. This process isn't very quick, so be patient.
-
-At the bottom of the log there is a special line from the `systemctl`
-service, `Removed symlink
-/etc/systemd/system/multi-user.target.wants/appliance-initialize.service`. The
-`appliance-initialize` service is no longer marked as enabled. This
-indicates that the base application initialization is complete now.
-
-We're not done yet though, there are other ancillary services which
-run in this pod to support the application. *Still in the rsh shell*,
-Use the `ps` command to monitor for the `httpd` processes
-starting. You will see output similar to the following when that stage
-has completed:
-
-```
-[root@manageiq-0 vmdb]# ps aux | grep http
-root 1941 0.0 0.1 249820 7640 ? Ss 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-apache 1942 0.0 0.0 250752 6012 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-apache 1943 0.0 0.0 250472 5952 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-apache 1944 0.0 0.0 250472 5916 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-apache 1945 0.0 0.0 250360 5764 ? S 15:02 0:00 /usr/sbin/httpd -DFOREGROUND
-```
-
-Furthermore, you can find other related processes by just looking for
-ones with `MIQ` in their name:
-
-```
-[root@manageiq-0 vmdb]# ps aux | grep miq
-root 333 27.7 4.2 555884 315916 ? Sl 14:58 3:59 MIQ Server
-root 1976 0.6 4.0 507224 303740 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 1, queue: generic
-root 1984 0.6 4.0 507224 304312 ? SNl 15:02 0:03 MIQ: MiqGenericWorker id: 2, queue: generic
-root 1992 0.9 4.0 508252 304888 ? SNl 15:02 0:05 MIQ: MiqPriorityWorker id: 3, queue: generic
-root 2000 0.7 4.0 510308 304696 ? SNl 15:02 0:04 MIQ: MiqPriorityWorker id: 4, queue: generic
-root 2008 1.2 4.0 514000 303612 ? SNl 15:02 0:07 MIQ: MiqScheduleWorker id: 5
-root 2026 0.2 4.0 517504 303644 ? SNl 15:02 0:01 MIQ: MiqEventHandler id: 6, queue: ems
-root 2036 0.2 4.0 518532 303768 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 7, queue: reporting
-root 2044 0.2 4.0 519560 303812 ? SNl 15:02 0:01 MIQ: MiqReportingWorker id: 8, queue: reporting
-root 2059 0.2 4.0 528372 303956 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:5000) [MIQ: Web Server Worker]
-root 2067 0.9 4.0 529664 305716 ? SNl 15:02 0:05 puma 3.3.0 (tcp://127.0.0.1:3000) [MIQ: Web Server Worker]
-root 2075 0.2 4.0 529408 304056 ? SNl 15:02 0:01 puma 3.3.0 (tcp://127.0.0.1:4000) [MIQ: Web Server Worker]
-root 2329 0.0 0.0 10640 972 ? S+ 15:13 0:00 grep --color=auto -i miq
-```
-
-Finally, *still in the rsh shell*, to test if the application is
-running correctly, we can request the application homepage. If the
-page is available the page title will be `ManageIQ: Login`:
-
-```
-[root@manageiq-0 vmdb]# curl -s -k https://localhost | grep -A2 '<title>'
-<title>
-ManageIQ: Login
-</title>
-```
-
-**Note:** The `-s` flag makes `curl` operations silent and the `-k`
-flag to ignore errors about untrusted certificates.
-
-
-
-# Additional Upstream Resources
-
-Below are some useful resources from the upstream project
-documentation. You may find these of value.
-
-* [Verify Setup Was Successful](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful)
-* [POD Access And Routes](https://github.com/ManageIQ/manageiq-pods#pod-access-and-routes)
-* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting)
-
-
-# Manual Cleanup
-
-At this time uninstallation/cleanup is still a manual process. You
-will have to follow a few steps to fully remove CFME from your
-cluster.
-
-Delete the project:
-
-* `oc delete project cfme`
-
-Delete the PVs:
-
-* `oc delete pv miq-pv01`
-* `oc delete pv miq-pv02`
-* `oc delete pv miq-pv03`
-
-Clean out the old PV data:
-
-* `cd /exports/`
-* `find miq* -type f -delete`
-* `find miq* -type d -delete`
-
-Remove the NFS exports:
-
-* `rm /etc/exports.d/openshift_cfme.exports`
-* `exportfs -ar`
-
-Delete the user:
-
-* `oc delete user cfme`
-
-**NOTE:** The `oc delete project cfme` command will return quickly
-however it will continue to operate in the background. Continue
-running `oc get project` after you've completed the other steps to
-monitor the pods and final project termination progress.
diff --git a/roles/openshift_cfme/defaults/main.yml b/roles/openshift_cfme/defaults/main.yml
deleted file mode 100644
index b82c2e602..000000000
--- a/roles/openshift_cfme/defaults/main.yml
+++ /dev/null
@@ -1,42 +0,0 @@
----
-# Namespace for the CFME project (Note: changed post-3.6 to use
-# reserved 'openshift-' namespace prefix)
-openshift_cfme_project: openshift-cfme
-# Namespace/project description
-openshift_cfme_project_description: ManageIQ - CloudForms Management Engine
-# Basic user assigned the `admin` role for the project
-openshift_cfme_user: cfme
-# Project system account for enabling privileged pods
-openshift_cfme_service_account: "system:serviceaccount:{{ openshift_cfme_project }}:default"
-# All the required exports
-openshift_cfme_pv_exports:
- - miq-pv01
- - miq-pv02
- - miq-pv03
-# PV template files and their created object names
-openshift_cfme_pv_data:
- - pv_name: miq-pv01
- pv_template: miq-pv-db.yaml
- pv_label: CFME DB PV
- - pv_name: miq-pv02
- pv_template: miq-pv-region.yaml
- pv_label: CFME Region PV
- - pv_name: miq-pv03
- pv_template: miq-pv-server.yaml
- pv_label: CFME Server PV
-
-# Tuning parameter to use more than 5 images at once from an ImageStream
-openshift_cfme_maxImagesBulkImportedPerRepository: 100
-# TODO: Refactor '_install_app' variable. This is just for testing but
-# maybe in the future it should control the entire yes/no for CFME.
-#
-# Whether or not the manageiq app should be initialized ('oc new-app
-# --template=manageiq). If False everything UP TO 'new-app' is ran.
-openshift_cfme_install_app: False
-# Docker image to pull
-openshift_cfme_application_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-app' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}"
-openshift_cfme_postgresql_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-postgresql' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}"
-openshift_cfme_memcached_img_name: "{{ 'registry.access.redhat.com/cloudforms45/cfme-openshift-memcached' if openshift_deployment_type == 'openshift-enterprise' else 'docker.io/manageiq/manageiq-pods' }}"
-openshift_cfme_application_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'app-latest-fine' }}"
-openshift_cfme_memcached_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'memcached-latest-fine' }}"
-openshift_cfme_postgresql_img_tag: "{{ 'latest' if openshift_deployment_type == 'openshift-enterprise' else 'postgresql-latest-fine' }}"
diff --git a/roles/openshift_cfme/files/miq-template.yaml b/roles/openshift_cfme/files/miq-template.yaml
deleted file mode 100644
index 8f0d2af38..000000000
--- a/roles/openshift_cfme/files/miq-template.yaml
+++ /dev/null
@@ -1,566 +0,0 @@
----
-path: /tmp/miq-template-out
-data:
- apiVersion: v1
- kind: Template
- labels:
- template: manageiq
- metadata:
- name: manageiq
- annotations:
- description: "ManageIQ appliance with persistent storage"
- tags: "instant-app,manageiq,miq"
- iconClass: "icon-rails"
- objects:
- - apiVersion: v1
- kind: Secret
- metadata:
- name: "${NAME}-secrets"
- stringData:
- pg-password: "${DATABASE_PASSWORD}"
- - apiVersion: v1
- kind: Service
- metadata:
- annotations:
- description: "Exposes and load balances ManageIQ pods"
- service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
- name: ${NAME}
- spec:
- clusterIP: None
- ports:
- - name: http
- port: 80
- protocol: TCP
- targetPort: 80
- - name: https
- port: 443
- protocol: TCP
- targetPort: 443
- selector:
- name: ${NAME}
- - apiVersion: v1
- kind: Route
- metadata:
- name: ${NAME}
- spec:
- host: ${APPLICATION_DOMAIN}
- port:
- targetPort: https
- tls:
- termination: passthrough
- to:
- kind: Service
- name: ${NAME}
- - apiVersion: v1
- kind: ImageStream
- metadata:
- name: miq-app
- annotations:
- description: "Keeps track of the ManageIQ image changes"
- spec:
- dockerImageRepository: "${APPLICATION_IMG_NAME}"
- - apiVersion: v1
- kind: ImageStream
- metadata:
- name: miq-postgresql
- annotations:
- description: "Keeps track of the PostgreSQL image changes"
- spec:
- dockerImageRepository: "${POSTGRESQL_IMG_NAME}"
- - apiVersion: v1
- kind: ImageStream
- metadata:
- name: miq-memcached
- annotations:
- description: "Keeps track of the Memcached image changes"
- spec:
- dockerImageRepository: "${MEMCACHED_IMG_NAME}"
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: "${NAME}-${DATABASE_SERVICE_NAME}"
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: ${DATABASE_VOLUME_CAPACITY}
- - apiVersion: v1
- kind: PersistentVolumeClaim
- metadata:
- name: "${NAME}-region"
- spec:
- accessModes:
- - ReadWriteOnce
- resources:
- requests:
- storage: ${APPLICATION_REGION_VOLUME_CAPACITY}
- - apiVersion: apps/v1beta1
- kind: "StatefulSet"
- metadata:
- name: ${NAME}
- annotations:
- description: "Defines how to deploy the ManageIQ appliance"
- spec:
- serviceName: "${NAME}"
- replicas: "${APPLICATION_REPLICA_COUNT}"
- template:
- metadata:
- labels:
- name: ${NAME}
- name: ${NAME}
- spec:
- containers:
- - name: manageiq
- image: "${APPLICATION_IMG_NAME}:${APPLICATION_IMG_TAG}"
- livenessProbe:
- tcpSocket:
- port: 443
- initialDelaySeconds: 480
- timeoutSeconds: 3
- readinessProbe:
- httpGet:
- path: /
- port: 443
- scheme: HTTPS
- initialDelaySeconds: 200
- timeoutSeconds: 3
- ports:
- - containerPort: 80
- protocol: TCP
- - containerPort: 443
- protocol: TCP
- securityContext:
- privileged: true
- volumeMounts:
- -
- name: "${NAME}-server"
- mountPath: "/persistent"
- -
- name: "${NAME}-region"
- mountPath: "/persistent-region"
- env:
- -
- name: "APPLICATION_INIT_DELAY"
- value: "${APPLICATION_INIT_DELAY}"
- -
- name: "DATABASE_SERVICE_NAME"
- value: "${DATABASE_SERVICE_NAME}"
- -
- name: "DATABASE_REGION"
- value: "${DATABASE_REGION}"
- -
- name: "MEMCACHED_SERVICE_NAME"
- value: "${MEMCACHED_SERVICE_NAME}"
- -
- name: "POSTGRESQL_USER"
- value: "${DATABASE_USER}"
- -
- name: "POSTGRESQL_PASSWORD"
- valueFrom:
- secretKeyRef:
- name: "${NAME}-secrets"
- key: "pg-password"
- -
- name: "POSTGRESQL_DATABASE"
- value: "${DATABASE_NAME}"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- value: "${POSTGRESQL_MAX_CONNECTIONS}"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- value: "${POSTGRESQL_SHARED_BUFFERS}"
- resources:
- requests:
- memory: "${APPLICATION_MEM_REQ}"
- cpu: "${APPLICATION_CPU_REQ}"
- limits:
- memory: "${APPLICATION_MEM_LIMIT}"
- lifecycle:
- preStop:
- exec:
- command:
- - /opt/manageiq/container-scripts/sync-pv-data
- volumes:
- -
- name: "${NAME}-region"
- persistentVolumeClaim:
- claimName: ${NAME}-region
- volumeClaimTemplates:
- - metadata:
- name: "${NAME}-server"
- annotations:
- # Uncomment this if using dynamic volume provisioning.
- # https://docs.openshift.org/latest/install_config/persistent_storage/dynamically_provisioning_pvs.html
- # volume.alpha.kubernetes.io/storage-class: anything
- spec:
- accessModes: [ ReadWriteOnce ]
- resources:
- requests:
- storage: "${APPLICATION_VOLUME_CAPACITY}"
- - apiVersion: v1
- kind: "Service"
- metadata:
- name: "${MEMCACHED_SERVICE_NAME}"
- annotations:
- description: "Exposes the memcached server"
- spec:
- ports:
- -
- name: "memcached"
- port: 11211
- targetPort: 11211
- selector:
- name: "${MEMCACHED_SERVICE_NAME}"
- - apiVersion: v1
- kind: "DeploymentConfig"
- metadata:
- name: "${MEMCACHED_SERVICE_NAME}"
- annotations:
- description: "Defines how to deploy memcached"
- spec:
- strategy:
- type: "Recreate"
- triggers:
- -
- type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "memcached"
- from:
- kind: "ImageStreamTag"
- name: "miq-memcached:${MEMCACHED_IMG_TAG}"
- -
- type: "ConfigChange"
- replicas: 1
- selector:
- name: "${MEMCACHED_SERVICE_NAME}"
- template:
- metadata:
- name: "${MEMCACHED_SERVICE_NAME}"
- labels:
- name: "${MEMCACHED_SERVICE_NAME}"
- spec:
- volumes: []
- containers:
- -
- name: "memcached"
- image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
- ports:
- -
- containerPort: 11211
- readinessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 5
- tcpSocket:
- port: 11211
- livenessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 30
- tcpSocket:
- port: 11211
- volumeMounts: []
- env:
- -
- name: "MEMCACHED_MAX_MEMORY"
- value: "${MEMCACHED_MAX_MEMORY}"
- -
- name: "MEMCACHED_MAX_CONNECTIONS"
- value: "${MEMCACHED_MAX_CONNECTIONS}"
- -
- name: "MEMCACHED_SLAB_PAGE_SIZE"
- value: "${MEMCACHED_SLAB_PAGE_SIZE}"
- resources:
- requests:
- memory: "${MEMCACHED_MEM_REQ}"
- cpu: "${MEMCACHED_CPU_REQ}"
- limits:
- memory: "${MEMCACHED_MEM_LIMIT}"
- - apiVersion: v1
- kind: "Service"
- metadata:
- name: "${DATABASE_SERVICE_NAME}"
- annotations:
- description: "Exposes the database server"
- spec:
- ports:
- -
- name: "postgresql"
- port: 5432
- targetPort: 5432
- selector:
- name: "${DATABASE_SERVICE_NAME}"
- - apiVersion: v1
- kind: "DeploymentConfig"
- metadata:
- name: "${DATABASE_SERVICE_NAME}"
- annotations:
- description: "Defines how to deploy the database"
- spec:
- strategy:
- type: "Recreate"
- triggers:
- -
- type: "ImageChange"
- imageChangeParams:
- automatic: true
- containerNames:
- - "postgresql"
- from:
- kind: "ImageStreamTag"
- name: "miq-postgresql:${POSTGRESQL_IMG_TAG}"
- -
- type: "ConfigChange"
- replicas: 1
- selector:
- name: "${DATABASE_SERVICE_NAME}"
- template:
- metadata:
- name: "${DATABASE_SERVICE_NAME}"
- labels:
- name: "${DATABASE_SERVICE_NAME}"
- spec:
- volumes:
- -
- name: "miq-pgdb-volume"
- persistentVolumeClaim:
- claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
- containers:
- -
- name: "postgresql"
- image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
- ports:
- -
- containerPort: 5432
- readinessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 15
- exec:
- command:
- - "/bin/sh"
- - "-i"
- - "-c"
- - "psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'"
- livenessProbe:
- timeoutSeconds: 1
- initialDelaySeconds: 60
- tcpSocket:
- port: 5432
- volumeMounts:
- -
- name: "miq-pgdb-volume"
- mountPath: "/var/lib/pgsql/data"
- env:
- -
- name: "POSTGRESQL_USER"
- value: "${DATABASE_USER}"
- -
- name: "POSTGRESQL_PASSWORD"
- valueFrom:
- secretKeyRef:
- name: "${NAME}-secrets"
- key: "pg-password"
- -
- name: "POSTGRESQL_DATABASE"
- value: "${DATABASE_NAME}"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- value: "${POSTGRESQL_MAX_CONNECTIONS}"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- value: "${POSTGRESQL_SHARED_BUFFERS}"
- resources:
- requests:
- memory: "${POSTGRESQL_MEM_REQ}"
- cpu: "${POSTGRESQL_CPU_REQ}"
- limits:
- memory: "${POSTGRESQL_MEM_LIMIT}"
-
- parameters:
- -
- name: "NAME"
- displayName: Name
- required: true
- description: "The name assigned to all of the frontend objects defined in this template."
- value: manageiq
- -
- name: "DATABASE_SERVICE_NAME"
- displayName: "PostgreSQL Service Name"
- required: true
- description: "The name of the OpenShift Service exposed for the PostgreSQL container."
- value: "postgresql"
- -
- name: "DATABASE_USER"
- displayName: "PostgreSQL User"
- required: true
- description: "PostgreSQL user that will access the database."
- value: "root"
- -
- name: "DATABASE_PASSWORD"
- displayName: "PostgreSQL Password"
- required: true
- description: "Password for the PostgreSQL user."
- from: "[a-zA-Z0-9]{8}"
- generate: expression
- -
- name: "DATABASE_NAME"
- required: true
- displayName: "PostgreSQL Database Name"
- description: "Name of the PostgreSQL database accessed."
- value: "vmdb_production"
- -
- name: "DATABASE_REGION"
- required: true
- displayName: "Application Database Region"
- description: "Database region that will be used for application."
- value: "0"
- -
- name: "MEMCACHED_SERVICE_NAME"
- required: true
- displayName: "Memcached Service Name"
- description: "The name of the OpenShift Service exposed for the Memcached container."
- value: "memcached"
- -
- name: "MEMCACHED_MAX_MEMORY"
- displayName: "Memcached Max Memory"
- description: "Memcached maximum memory for memcached object storage in MB."
- value: "64"
- -
- name: "MEMCACHED_MAX_CONNECTIONS"
- displayName: "Memcached Max Connections"
- description: "Memcached maximum number of connections allowed."
- value: "1024"
- -
- name: "MEMCACHED_SLAB_PAGE_SIZE"
- displayName: "Memcached Slab Page Size"
- description: "Memcached size of each slab page."
- value: "1m"
- -
- name: "POSTGRESQL_MAX_CONNECTIONS"
- displayName: "PostgreSQL Max Connections"
- description: "PostgreSQL maximum number of database connections allowed."
- value: "100"
- -
- name: "POSTGRESQL_SHARED_BUFFERS"
- displayName: "PostgreSQL Shared Buffer Amount"
- description: "Amount of memory dedicated for PostgreSQL shared memory buffers."
- value: "256MB"
- -
- name: "APPLICATION_CPU_REQ"
- displayName: "Application Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the Application container will need (expressed in millicores)."
- value: "1000m"
- -
- name: "POSTGRESQL_CPU_REQ"
- displayName: "PostgreSQL Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores)."
- value: "500m"
- -
- name: "MEMCACHED_CPU_REQ"
- displayName: "Memcached Min CPU Requested"
- required: true
- description: "Minimum amount of CPU time the Memcached container will need (expressed in millicores)."
- value: "200m"
- -
- name: "APPLICATION_MEM_REQ"
- displayName: "Application Min RAM Requested"
- required: true
- description: "Minimum amount of memory the Application container will need."
- value: "6144Mi"
- -
- name: "POSTGRESQL_MEM_REQ"
- displayName: "PostgreSQL Min RAM Requested"
- required: true
- description: "Minimum amount of memory the PostgreSQL container will need."
- value: "1024Mi"
- -
- name: "MEMCACHED_MEM_REQ"
- displayName: "Memcached Min RAM Requested"
- required: true
- description: "Minimum amount of memory the Memcached container will need."
- value: "64Mi"
- -
- name: "APPLICATION_MEM_LIMIT"
- displayName: "Application Max RAM Limit"
- required: true
- description: "Maximum amount of memory the Application container can consume."
- value: "16384Mi"
- -
- name: "POSTGRESQL_MEM_LIMIT"
- displayName: "PostgreSQL Max RAM Limit"
- required: true
- description: "Maximum amount of memory the PostgreSQL container can consume."
- value: "8192Mi"
- -
- name: "MEMCACHED_MEM_LIMIT"
- displayName: "Memcached Max RAM Limit"
- required: true
- description: "Maximum amount of memory the Memcached container can consume."
- value: "256Mi"
- -
- name: "POSTGRESQL_IMG_NAME"
- displayName: "PostgreSQL Image Name"
- description: "This is the PostgreSQL image name requested to deploy."
- value: "docker.io/manageiq/manageiq-pods"
- -
- name: "POSTGRESQL_IMG_TAG"
- displayName: "PostgreSQL Image Tag"
- description: "This is the PostgreSQL image tag/version requested to deploy."
- value: "postgresql-latest-fine"
- -
- name: "MEMCACHED_IMG_NAME"
- displayName: "Memcached Image Name"
- description: "This is the Memcached image name requested to deploy."
- value: "docker.io/manageiq/manageiq-pods"
- -
- name: "MEMCACHED_IMG_TAG"
- displayName: "Memcached Image Tag"
- description: "This is the Memcached image tag/version requested to deploy."
- value: "memcached-latest-fine"
- -
- name: "APPLICATION_IMG_NAME"
- displayName: "Application Image Name"
- description: "This is the Application image name requested to deploy."
- value: "docker.io/manageiq/manageiq-pods"
- -
- name: "APPLICATION_IMG_TAG"
- displayName: "Application Image Tag"
- description: "This is the Application image tag/version requested to deploy."
- value: "app-latest-fine"
- -
- name: "APPLICATION_DOMAIN"
- displayName: "Application Hostname"
- description: "The exposed hostname that will route to the application service, if left blank a value will be defaulted."
- value: ""
- -
- name: "APPLICATION_REPLICA_COUNT"
- displayName: "Application Replica Count"
- description: "This is the number of Application replicas requested to deploy."
- value: "1"
- -
- name: "APPLICATION_INIT_DELAY"
- displayName: "Application Init Delay"
- required: true
- description: "Delay in seconds before we attempt to initialize the application."
- value: "15"
- -
- name: "APPLICATION_VOLUME_CAPACITY"
- displayName: "Application Volume Capacity"
- required: true
- description: "Volume space available for application data."
- value: "5Gi"
- -
- name: "APPLICATION_REGION_VOLUME_CAPACITY"
- displayName: "Application Region Volume Capacity"
- required: true
- description: "Volume space available for region application data."
- value: "5Gi"
- -
- name: "DATABASE_VOLUME_CAPACITY"
- displayName: "Database Volume Capacity"
- required: true
- description: "Volume space available for database."
- value: "15Gi"
diff --git a/roles/openshift_cfme/files/openshift_cfme.exports b/roles/openshift_cfme/files/openshift_cfme.exports
deleted file mode 100644
index 5457d41fc..000000000
--- a/roles/openshift_cfme/files/openshift_cfme.exports
+++ /dev/null
@@ -1,3 +0,0 @@
-/exports/miq-pv01 *(rw,no_root_squash,no_wdelay)
-/exports/miq-pv02 *(rw,no_root_squash,no_wdelay)
-/exports/miq-pv03 *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_cfme/handlers/main.yml b/roles/openshift_cfme/handlers/main.yml
deleted file mode 100644
index 7e90b09a4..000000000
--- a/roles/openshift_cfme/handlers/main.yml
+++ /dev/null
@@ -1,37 +0,0 @@
----
-######################################################################
-# NOTE: These are duplicated from roles/openshift_master/handlers/main.yml
-#
-# TODO: Use the consolidated 'openshift_handlers' role once it's ready
-# See: https://github.com/openshift/openshift-ansible/pull/4041#discussion_r118770782
-######################################################################
-
-- name: restart master api
- systemd: name={{ openshift.common.service_type }}-master-api state=restarted
- when: (not (master_api_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
- notify: Verify API Server
-
-- name: restart master controllers
- systemd: name={{ openshift.common.service_type }}-master-controllers state=restarted
- when: (not (master_controllers_service_status_changed | default(false) | bool)) and openshift.master.cluster_method == 'native'
-
-- name: Verify API Server
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- args:
- # Disables the following warning:
- # Consider using get_url or uri module rather than running curl
- warn: no
- register: api_available_output
- until: api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- changed_when: false
diff --git a/roles/openshift_cfme/img/CFMEBasicDeployment.png b/roles/openshift_cfme/img/CFMEBasicDeployment.png
deleted file mode 100644
index a89c1e325..000000000
--- a/roles/openshift_cfme/img/CFMEBasicDeployment.png
+++ /dev/null
Binary files differ
diff --git a/roles/openshift_cfme/tasks/create_pvs.yml b/roles/openshift_cfme/tasks/create_pvs.yml
deleted file mode 100644
index 7fa7d3997..000000000
--- a/roles/openshift_cfme/tasks/create_pvs.yml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-# Check for existance and then conditionally:
-# - evaluate templates
-# - PVs
-#
-# These tasks idempotently create required CFME PV objects. Do not
-# call this file directly. This file is intended to be ran as an
-# include that has a 'with_items' attached to it. Hence the use below
-# of variables like "{{ item.pv_label }}"
-
-- name: "Check if the {{ item.pv_label }} template has been created already"
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- state: list
- kind: pv
- name: "{{ item.pv_name }}"
- register: miq_pv_check
-
-# Skip all of this if the PV already exists
-- block:
- - name: "Ensure the {{ item.pv_label }} template is evaluated"
- template:
- src: "{{ item.pv_template }}.j2"
- dest: "{{ template_dir }}/{{ item.pv_template }}"
-
- - name: "Ensure {{ item.pv_label }} is created"
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- kind: pv
- name: "{{ item.pv_name }}"
- state: present
- delete_after: True
- files:
- - "{{ template_dir }}/{{ item.pv_template }}"
- when:
- - not miq_pv_check.results.results.0
diff --git a/roles/openshift_cfme/tasks/main.yml b/roles/openshift_cfme/tasks/main.yml
deleted file mode 100644
index 74ae16d91..000000000
--- a/roles/openshift_cfme/tasks/main.yml
+++ /dev/null
@@ -1,117 +0,0 @@
----
-######################################################################
-# Users, projects, and privileges
-
-- name: Ensure the CFME user exists
- oc_user:
- state: present
- username: "{{ openshift_cfme_user }}"
-
-- name: Ensure the CFME namespace exists with CFME user as admin
- oc_project:
- state: present
- name: "{{ openshift_cfme_project }}"
- display_name: "{{ openshift_cfme_project_description }}"
- admin: "{{ openshift_cfme_user }}"
-
-- name: Ensure the CFME namespace service account is privileged
- oc_adm_policy_user:
- namespace: "{{ openshift_cfme_project }}"
- user: "{{ openshift_cfme_service_account }}"
- resource_kind: scc
- resource_name: privileged
- state: present
-
-######################################################################
-# NFS
-# In the case that we are not running on a cloud provider, volumes must be statically provisioned
-
-- include: nfs.yml
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-######################################################################
-# CFME App Template
-#
-# Note, this is different from the create_pvs.yml tasks in that the
-# application template does not require any jinja2 evaluation.
-#
-# TODO: Handle the case where the server template is updated in
-# openshift-ansible and the change needs to be landed on the managed
-# cluster.
-
-- name: Check if the CFME Server template has been created already
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- state: list
- kind: template
- name: manageiq
- register: miq_server_check
-
-- name: Copy over CFME Server template
- copy:
- src: miq-template.yaml
- dest: "{{ template_dir }}/miq-template.yaml"
-
-- name: Ensure the server template was read from disk
- debug:
- var=r_openshift_cfme_miq_template_content
-
-- name: Ensure CFME Server Template exists
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- kind: template
- name: "manageiq"
- state: present
- content: "{{ r_openshift_cfme_miq_template_content }}"
-
-######################################################################
-# Let's do this
-
-- name: Ensure the CFME Server is created
- oc_process:
- namespace: "{{ openshift_cfme_project }}"
- template_name: manageiq
- create: True
- params:
- APPLICATION_IMG_NAME: "{{ openshift_cfme_application_img_name }}"
- POSTGRESQL_IMG_NAME: "{{ openshift_cfme_postgresql_img_name }}"
- MEMCACHED_IMG_NAME: "{{ openshift_cfme_memcached_img_name }}"
- APPLICATION_IMG_TAG: "{{ openshift_cfme_application_img_tag }}"
- POSTGRESQL_IMG_TAG: "{{ openshift_cfme_postgresql_img_tag }}"
- MEMCACHED_IMG_TAG: "{{ openshift_cfme_memcached_img_tag }}"
- register: cfme_new_app_process
- run_once: True
- when:
- # User said to install CFME in their inventory
- - openshift_cfme_install_app | bool
- # # The server app doesn't exist already
- # - not miq_server_check.results.results.0
-
-- debug:
- var: cfme_new_app_process
-
-######################################################################
-# Various cleanup steps
-
-# TODO: Not sure what to do about this right now. Might be able to
-# just delete it? This currently warns about "Unable to find
-# '<TEMP_DIR>' in expected paths."
-- name: Ensure the temporary PV/App templates are erased
- file:
- path: "{{ item }}"
- state: absent
- with_fileglob:
- - "{{ template_dir }}/*.yaml"
-
-- name: Ensure the temporary PV/app template directory is erased
- file:
- path: "{{ template_dir }}"
- state: absent
-
-######################################################################
-
-- name: Status update
- debug:
- msg: >
- CFME has been deployed. Note that there will be a delay before
- it is fully initialized.
diff --git a/roles/openshift_cfme/tasks/nfs.yml b/roles/openshift_cfme/tasks/nfs.yml
deleted file mode 100644
index ca04628a8..000000000
--- a/roles/openshift_cfme/tasks/nfs.yml
+++ /dev/null
@@ -1,51 +0,0 @@
----
-# Tasks to statically provision NFS volumes
-# Include if not using dynamic volume provisioning
-
-- name: Set openshift_cfme_nfs_server fact
- when: openshift_cfme_nfs_server is not defined
- set_fact:
- # Hostname/IP of the NFS server. Currently defaults to first master
- openshift_cfme_nfs_server: "{{ oo_nfs_to_config.0 }}"
-
-- name: Ensure the /exports/ directory exists
- file:
- path: /exports/
- state: directory
- mode: 0755
- owner: root
- group: root
-
-- name: Ensure the miq-pv0X export directories exist
- file:
- path: "/exports/{{ item }}"
- state: directory
- mode: 0775
- owner: root
- group: root
- with_items: "{{ openshift_cfme_pv_exports }}"
-
-- name: Ensure the NFS exports for CFME PVs exist
- copy:
- src: openshift_cfme.exports
- dest: /etc/exports.d/openshift_cfme.exports
- register: nfs_exports_updated
-
-- name: Ensure the NFS export table is refreshed if exports were added
- command: exportfs -ar
- when:
- - nfs_exports_updated.changed
-
-
-######################################################################
-# Create the required CFME PVs. Check out these online docs if you
-# need a refresher on includes looping with items:
-# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
-# * http://stackoverflow.com/a/35128533
-#
-# TODO: Handle the case where a PV template is updated in
-# openshift-ansible and the change needs to be landed on the managed
-# cluster.
-
-- include: create_pvs.yml
- with_items: "{{ openshift_cfme_pv_data }}"
diff --git a/roles/openshift_cfme/tasks/tune_masters.yml b/roles/openshift_cfme/tasks/tune_masters.yml
deleted file mode 100644
index 02b0f10bf..000000000
--- a/roles/openshift_cfme/tasks/tune_masters.yml
+++ /dev/null
@@ -1,12 +0,0 @@
----
-- name: Ensure bulk image import limit is tuned
- yedit:
- src: /etc/origin/master/master-config.yaml
- key: 'imagePolicyConfig.maxImagesBulkImportedPerRepository'
- value: "{{ openshift_cfme_maxImagesBulkImportedPerRepository | int() }}"
- state: present
- backup: True
- notify:
- - restart master
-
-- meta: flush_handlers
diff --git a/roles/openshift_cfme/tasks/uninstall.yml b/roles/openshift_cfme/tasks/uninstall.yml
deleted file mode 100644
index 406b59364..000000000
--- a/roles/openshift_cfme/tasks/uninstall.yml
+++ /dev/null
@@ -1,46 +0,0 @@
----
-- include_role:
- name: lib_openshift
-
-- name: Uninstall CFME - ManageIQ
- debug:
- msg: Uninstalling Cloudforms Management Engine - ManageIQ
-
-- name: Ensure the CFME project is removed
- oc_project:
- state: absent
- name: "{{ openshift_cfme_project }}"
-
-- name: Ensure the CFME template is removed
- oc_obj:
- namespace: "{{ openshift_cfme_project }}"
- state: absent
- kind: template
- name: manageiq
-
-- name: Ensure the CFME PVs are removed
- oc_obj:
- state: absent
- all_namespaces: True
- kind: pv
- name: "{{ item }}"
- with_items: "{{ openshift_cfme_pv_exports }}"
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-- name: Ensure the CFME user is removed
- oc_user:
- state: absent
- username: "{{ openshift_cfme_user }}"
-
-- name: Ensure the CFME NFS Exports are removed
- file:
- path: /etc/exports.d/openshift_cfme.exports
- state: absent
- register: nfs_exports_removed
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-- name: Ensure the NFS export table is refreshed if exports were removed
- command: exportfs -ar
- when:
- - nfs_exports_removed.changed
- - not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
diff --git a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
deleted file mode 100644
index 280f3e97a..000000000
--- a/roles/openshift_cfme/templates/miq-pv-db.yaml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: miq-pv01
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: {{ openshift_cfme_nfs_directory }}/miq-pv01
- server: {{ openshift_cfme_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
deleted file mode 100644
index fe80dffa5..000000000
--- a/roles/openshift_cfme/templates/miq-pv-region.yaml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: miq-pv02
-spec:
- capacity:
- storage: 5Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: {{ openshift_cfme_nfs_directory }}/miq-pv02
- server: {{ openshift_cfme_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2 b/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
deleted file mode 100644
index f84b67ea9..000000000
--- a/roles/openshift_cfme/templates/miq-pv-server.yaml.j2
+++ /dev/null
@@ -1,13 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: miq-pv03
-spec:
- capacity:
- storage: 5Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: {{ openshift_cfme_nfs_directory }}/miq-pv03
- server: {{ openshift_cfme_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_cli/tasks/main.yml b/roles/openshift_cli/tasks/main.yml
index 9e61805f9..14d8a3325 100644
--- a/roles/openshift_cli/tasks/main.yml
+++ b/roles/openshift_cli/tasks/main.yml
@@ -1,6 +1,9 @@
---
- set_fact:
- l_use_crio: "{{ openshift_use_crio | default(false) }}"
+ l_use_crio_only: "{{ openshift_use_crio_only | default(false) }}"
+ l_is_system_container_image: "{{ openshift_use_master_system_container | default(openshift_use_system_containers | default(false)) | bool }}"
+- set_fact:
+ l_use_cli_atomic_image: "{{ l_use_crio_only or l_is_system_container_image }}"
- name: Install clients
package: name={{ openshift.common.service_type }}-clients state=present
@@ -20,23 +23,23 @@
backend: "docker"
when:
- openshift.common.is_containerized | bool
- - not l_use_crio
+ - not l_use_cli_atomic_image | bool
- block:
- name: Pull CLI Image
command: >
- atomic pull --storage ostree {{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
+ atomic pull --storage ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Pulling layer' in pull_result.stdout"
- name: Copy client binaries/symlinks out of CLI image for use on the host
openshift_container_binary_sync:
- image: "{{ openshift.common.system_images_registry }}/{{ openshift.common.cli_image }}"
+ image: "{{ '' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.common.cli_image }}"
tag: "{{ openshift_image_tag }}"
backend: "atomic"
when:
- openshift.common.is_containerized | bool
- - l_use_crio
+ - l_use_cli_atomic_image | bool
- name: Reload facts to pick up installed OpenShift version
openshift_facts:
diff --git a/roles/openshift_default_storage_class/defaults/main.yml b/roles/openshift_default_storage_class/defaults/main.yml
index bdece7640..014c06641 100644
--- a/roles/openshift_default_storage_class/defaults/main.yml
+++ b/roles/openshift_default_storage_class/defaults/main.yml
@@ -13,6 +13,12 @@ openshift_storageclass_defaults:
parameters:
type: pd-standard
+ openstack:
+ name: standard
+ provisioner: cinder
+ parameters:
+ fstype: xfs
+
openshift_storageclass_default: "true"
openshift_storageclass_name: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['name'] }}"
openshift_storageclass_provisioner: "{{ openshift_storageclass_defaults[openshift_cloudprovider_kind]['provisioner'] }}"
diff --git a/roles/openshift_default_storage_class/tasks/main.yml b/roles/openshift_default_storage_class/tasks/main.yml
index 172e2ac25..281ec8ed5 100644
--- a/roles/openshift_default_storage_class/tasks/main.yml
+++ b/roles/openshift_default_storage_class/tasks/main.yml
@@ -1,5 +1,5 @@
---
-# Install default storage classes in GCE & AWS
+# Install default storage classes in GCE & AWS & OPENSTACK
- name: Ensure storageclass object
oc_storageclass:
name: "{{ openshift_storageclass_name }}"
diff --git a/roles/openshift_etcd_facts/vars/main.yml b/roles/openshift_etcd_facts/vars/main.yml
index b3ecd57a6..0c072b64a 100644
--- a/roles/openshift_etcd_facts/vars/main.yml
+++ b/roles/openshift_etcd_facts/vars/main.yml
@@ -6,6 +6,5 @@ etcd_ip: "{{ openshift.common.ip }}"
etcd_cert_subdir: "etcd-{{ openshift.common.hostname }}"
etcd_cert_prefix:
etcd_cert_config_dir: "/etc/etcd"
-etcd_system_container_cert_config_dir: /var/lib/etcd/etcd.etcd/etc
etcd_peer_url_scheme: https
etcd_url_scheme: https
diff --git a/roles/openshift_examples/examples-sync.sh b/roles/openshift_examples/examples-sync.sh
index ca3f219d8..1a14c32f5 100755
--- a/roles/openshift_examples/examples-sync.sh
+++ b/roles/openshift_examples/examples-sync.sh
@@ -5,7 +5,7 @@
#
# This script should be run from openshift-ansible/roles/openshift_examples
-XPAAS_VERSION=ose-v1.4.1
+XPAAS_VERSION=ose-v1.4.5
ORIGIN_VERSION=${1:-v3.7}
RHAMP_TAG=2.0.0.GA
EXAMPLES_BASE=$(pwd)/files/examples/${ORIGIN_VERSION}
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json
index 6500ed0d3..5e7585eeb 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-ephemeral-template.json
@@ -8,10 +8,10 @@
"description": "MariaDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
- "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json
index 4378fa4a0..217ef11dd 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mariadb-persistent-template.json
@@ -8,10 +8,10 @@
"description": "MariaDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mariadb",
"tags": "database,mariadb",
- "template.openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone MariaDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/blob/master/10.1/README.md.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json
index 7271a2c69..10f202c59 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-ephemeral-template.json
@@ -8,10 +8,10 @@
"description": "MongoDB database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
- "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json
index d70d2263f..97e4128a4 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mongodb-persistent-template.json
@@ -8,10 +8,10 @@
"description": "MongoDB database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mongodb",
"tags": "database,mongodb",
- "template.openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone MongoDB server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mongodb.html",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MONGODB_USER}\n Password: ${MONGODB_PASSWORD}\n Database Name: ${MONGODB_DATABASE}\n Connection URL: mongodb://${MONGODB_USER}:${MONGODB_PASSWORD}@${DATABASE_SERVICE_NAME}/${MONGODB_DATABASE}\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/blob/master/3.2/README.md.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json
index 54785993c..c0946416d 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-ephemeral-template.json
@@ -8,10 +8,10 @@
"description": "MySQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
- "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json
index 2bd84b106..48ac114fd 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/mysql-persistent-template.json
@@ -8,10 +8,10 @@
"description": "MySQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-mysql-database",
"tags": "database,mysql",
- "template.openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone MySQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/mysql.html",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${MYSQL_USER}\n Password: ${MYSQL_PASSWORD}\n Database Name: ${MYSQL_DATABASE}\n Connection URL: mysql://${DATABASE_SERVICE_NAME}:3306/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/mysql-container/blob/master/5.7/README.md.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json
index 849c9d83f..7c419f1ae 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-ephemeral-template.json
@@ -8,10 +8,10 @@
"description": "PostgreSQL database service, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
- "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is not stored on persistent storage, so any restart of the service will result in all data being lost. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json
index b622baa01..8a2d23907 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/postgresql-persistent-template.json
@@ -8,10 +8,10 @@
"description": "PostgreSQL database service, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.\n\nNOTE: Scaling to more than one replica is not supported. You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-postgresql",
"tags": "database,postgresql",
- "template.openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone PostgreSQL server with a database created. The database is stored on persistent storage. The database name, username, and password are chosen via parameters when provisioning this service.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/db_images/postgresql.html",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Username: ${POSTGRESQL_USER}\n Password: ${POSTGRESQL_PASSWORD}\n Database Name: ${POSTGRESQL_DATABASE}\n Connection URL: postgresql://${DATABASE_SERVICE_NAME}:5432/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/blob/master/9.5.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json
index 15bdd079b..ee60af9db 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-ephemeral-template.json
@@ -8,10 +8,10 @@
"description": "Redis in-memory data structure store, without persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing",
"iconClass": "icon-redis",
"tags": "database,redis",
- "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is not stored on persistent storage, so any restart of the service will result in all data being lost.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone Redis server. The data is not stored on persistent storage, so any restart of the service will result in all data being lost.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
diff --git a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json
index 1e31b02e0..e0e0a88d5 100644
--- a/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/db-templates/redis-persistent-template.json
@@ -8,10 +8,10 @@
"description": "Redis in-memory data structure store, with persistent storage. For more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-redis",
"tags": "database,redis",
- "template.openshift.io/long-description": "This template provides a standalone Redis server. The data is stored on persistent storage.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template provides a standalone Redis server. The data is stored on persistent storage.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/sclorg/redis-container/tree/master/3.2",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${DATABASE_SERVICE_NAME}.\n\n Password: ${REDIS_PASSWORD}\n Connection URL: redis://${DATABASE_SERVICE_NAME}:6379/\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/sclorg/redis-container/blob/master/3.2.",
diff --git a/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json b/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json
index 6cef21945..e7af160d9 100644
--- a/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json
+++ b/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-centos7.json
@@ -9,7 +9,7 @@
"metadata": {
"name": "httpd",
"annotations": {
- "openshift.io/display-name": "Httpd"
+ "openshift.io/display-name": "Apache HTTP Server (httpd)"
}
},
"spec": {
@@ -17,8 +17,9 @@
{
"name": "latest",
"annotations": {
- "openshift.io/display-name": "Httpd (Latest)",
- "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "openshift.io/display-name": "Apache HTTP Server (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and serve static content via Apache HTTP Server (httpd) on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
"iconClass": "icon-apache",
"tags": "builder,httpd",
"supports":"httpd",
@@ -32,8 +33,9 @@
{
"name": "2.4",
"annotations": {
- "openshift.io/display-name": "Httpd 2.4",
- "description": "Build and serve static content via Httpd on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "openshift.io/display-name": "Apache HTTP Server 2.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and serve static content via Apache HTTP Server (httpd) 2.4 on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
"iconClass": "icon-apache",
"tags": "builder,httpd",
"supports":"httpd",
@@ -63,6 +65,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Ruby (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Ruby applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
@@ -71,13 +74,14 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "2.3"
+ "name": "2.4"
}
},
{
"name": "2.0",
"annotations": {
"openshift.io/display-name": "Ruby 2.0",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Ruby 2.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
"tags": "hidden,builder,ruby",
@@ -94,6 +98,7 @@
"name": "2.2",
"annotations": {
"openshift.io/display-name": "Ruby 2.2",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Ruby 2.2 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
@@ -110,6 +115,7 @@
"name": "2.3",
"annotations": {
"openshift.io/display-name": "Ruby 2.3",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Ruby 2.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
@@ -121,6 +127,23 @@
"kind": "DockerImage",
"name": "centos/ruby-23-centos7:latest"
}
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Ruby 2.4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.4,ruby",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "centos/ruby-24-centos7:latest"
+ }
}
]
}
@@ -140,6 +163,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Node.js (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Node.js applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
@@ -155,6 +179,7 @@
"name": "0.10",
"annotations": {
"openshift.io/display-name": "Node.js 0.10",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "DEPRECATED: Build and run Node.js 0.10 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
"iconClass": "icon-nodejs",
"tags": "hidden,nodejs",
@@ -171,6 +196,7 @@
"name": "4",
"annotations": {
"openshift.io/display-name": "Node.js 4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Node.js 4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
@@ -187,6 +213,7 @@
"name": "6",
"annotations": {
"openshift.io/display-name": "Node.js 6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Node.js 6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/6/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
@@ -217,6 +244,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Perl (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Perl applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
"iconClass": "icon-perl",
"tags": "builder,perl",
@@ -232,6 +260,7 @@
"name": "5.16",
"annotations": {
"openshift.io/display-name": "Perl 5.16",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Perl 5.16 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
"tags": "hidden,builder,perl",
@@ -248,6 +277,7 @@
"name": "5.20",
"annotations": {
"openshift.io/display-name": "Perl 5.20",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Perl 5.20 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
@@ -264,6 +294,7 @@
"name": "5.24",
"annotations": {
"openshift.io/display-name": "Perl 5.24",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Perl 5.24 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
@@ -294,6 +325,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "PHP (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run PHP applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
"iconClass": "icon-php",
"tags": "builder,php",
@@ -309,6 +341,7 @@
"name": "5.5",
"annotations": {
"openshift.io/display-name": "PHP 5.5",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run PHP 5.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
"tags": "hidden,builder,php",
@@ -325,6 +358,7 @@
"name": "5.6",
"annotations": {
"openshift.io/display-name": "PHP 5.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run PHP 5.6 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
@@ -341,6 +375,7 @@
"name": "7.0",
"annotations": {
"openshift.io/display-name": "PHP 7.0",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run PHP 7.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
@@ -371,6 +406,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Python (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
@@ -386,6 +422,7 @@
"name": "3.3",
"annotations": {
"openshift.io/display-name": "Python 3.3",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python 3.3 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
"tags": "hidden,builder,python",
@@ -402,6 +439,7 @@
"name": "2.7",
"annotations": {
"openshift.io/display-name": "Python 2.7",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python 2.7 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
@@ -418,6 +456,7 @@
"name": "3.4",
"annotations": {
"openshift.io/display-name": "Python 3.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python 3.4 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
@@ -434,6 +473,7 @@
"name": "3.5",
"annotations": {
"openshift.io/display-name": "Python 3.5",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python 3.5 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
@@ -464,6 +504,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "WildFly (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run WildFly applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of WildFly available on OpenShift, including major versions updates.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
@@ -479,6 +520,7 @@
"name": "8.1",
"annotations": {
"openshift.io/display-name": "WildFly 8.1",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run WildFly 8.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
@@ -495,6 +537,7 @@
"name": "9.0",
"annotations": {
"openshift.io/display-name": "WildFly 9.0",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run WildFly 9.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
@@ -511,6 +554,7 @@
"name": "10.0",
"annotations": {
"openshift.io/display-name": "WildFly 10.0",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run WildFly 10.0 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
@@ -527,6 +571,7 @@
"name": "10.1",
"annotations": {
"openshift.io/display-name": "WildFly 10.1",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run WildFly 10.1 applications on CentOS 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/openshift-s2i/s2i-wildfly/blob/master/README.md.",
"iconClass": "icon-wildfly",
"tags": "builder,wildfly,java",
@@ -557,6 +602,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "MySQL (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MySQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
"iconClass": "icon-mysql-database",
"tags": "mysql"
@@ -570,6 +616,7 @@
"name": "5.5",
"annotations": {
"openshift.io/display-name": "MySQL 5.5",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MySQL 5.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
"tags": "hidden,mysql",
@@ -584,6 +631,7 @@
"name": "5.6",
"annotations": {
"openshift.io/display-name": "MySQL 5.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MySQL 5.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
@@ -598,6 +646,7 @@
"name": "5.7",
"annotations": {
"openshift.io/display-name": "MySQL 5.7",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MySQL 5.7 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
@@ -626,6 +675,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "MariaDB (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MariaDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mariadb",
"tags": "mariadb"
@@ -639,6 +689,7 @@
"name": "10.1",
"annotations": {
"openshift.io/display-name": "MariaDB 10.1",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MariaDB 10.1 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
"iconClass": "icon-mariadb",
"tags": "mariadb",
@@ -667,6 +718,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "PostgreSQL (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a PostgreSQL database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
"iconClass": "icon-postgresql",
"tags": "postgresql"
@@ -680,6 +732,7 @@
"name": "9.2",
"annotations": {
"openshift.io/display-name": "PostgreSQL 9.2",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a PostgreSQL 9.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
"tags": "hidden,postgresql",
@@ -694,6 +747,7 @@
"name": "9.4",
"annotations": {
"openshift.io/display-name": "PostgreSQL 9.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a PostgreSQL 9.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
@@ -708,6 +762,7 @@
"name": "9.5",
"annotations": {
"openshift.io/display-name": "PostgreSQL 9.5",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a PostgreSQL 9.5 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
@@ -736,6 +791,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "MongoDB (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MongoDB database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mongodb",
"tags": "mongodb"
@@ -749,6 +805,7 @@
"name": "2.4",
"annotations": {
"openshift.io/display-name": "MongoDB 2.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MongoDB 2.4 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
"tags": "hidden,mongodb",
@@ -763,6 +820,7 @@
"name": "2.6",
"annotations": {
"openshift.io/display-name": "MongoDB 2.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MongoDB 2.6 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
@@ -777,6 +835,7 @@
"name": "3.2",
"annotations": {
"openshift.io/display-name": "MongoDB 3.2",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MongoDB 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
@@ -805,6 +864,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Redis (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Redis database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
"iconClass": "icon-redis",
"tags": "redis"
@@ -818,6 +878,7 @@
"name": "3.2",
"annotations": {
"openshift.io/display-name": "Redis 3.2",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Redis 3.2 database on CentOS 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
"iconClass": "icon-redis",
"tags": "redis",
@@ -846,6 +907,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Jenkins (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Jenkins server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
"iconClass": "icon-jenkins",
"tags": "jenkins"
@@ -859,6 +921,7 @@
"name": "1",
"annotations": {
"openshift.io/display-name": "Jenkins 1.X",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Jenkins 1.X server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
"tags": "hidden,jenkins",
@@ -873,6 +936,7 @@
"name": "2",
"annotations": {
"openshift.io/display-name": "Jenkins 2.X",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Jenkins v2.x server on CentOS 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
"tags": "jenkins",
diff --git a/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json b/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json
index abdae01e3..2b082fc75 100644
--- a/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json
+++ b/roles/openshift_examples/files/examples/v3.7/image-streams/image-streams-rhel7.json
@@ -9,7 +9,7 @@
"metadata": {
"name": "httpd",
"annotations": {
- "openshift.io/display-name": "Httpd"
+ "openshift.io/display-name": "Apache HTTP Server (httpd)"
}
},
"spec": {
@@ -17,8 +17,9 @@
{
"name": "latest",
"annotations": {
- "openshift.io/display-name": "Httpd (Latest)",
- "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
+ "openshift.io/display-name": "Apache HTTP Server (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and serve static content via Apache HTTP Server (httpd) on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Httpd available on OpenShift, including major versions updates.",
"iconClass": "icon-apache",
"tags": "builder,httpd",
"supports":"httpd",
@@ -32,8 +33,9 @@
{
"name": "2.4",
"annotations": {
- "openshift.io/display-name": "Httpd 2.4",
- "description": "Build and serve static content via Httpd on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
+ "openshift.io/display-name": "Apache HTTP Server 2.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and serve static content via Apache HTTP Server (httpd) 2.4 on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/httpd-container/blob/master/2.4/README.md.",
"iconClass": "icon-apache",
"tags": "builder,httpd",
"supports":"httpd",
@@ -63,6 +65,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Ruby (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Ruby applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.3/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Ruby available on OpenShift, including major versions updates.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
@@ -71,13 +74,14 @@
},
"from": {
"kind": "ImageStreamTag",
- "name": "2.3"
+ "name": "2.4"
}
},
{
"name": "2.0",
"annotations": {
"openshift.io/display-name": "Ruby 2.0",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Ruby 2.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.0/README.md.",
"iconClass": "icon-ruby",
"tags": "hidden,builder,ruby",
@@ -94,6 +98,7 @@
"name": "2.2",
"annotations": {
"openshift.io/display-name": "Ruby 2.2",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Ruby 2.2 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/tree/master/2.2/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
@@ -110,6 +115,7 @@
"name": "2.3",
"annotations": {
"openshift.io/display-name": "Ruby 2.3",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Ruby 2.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.3/README.md.",
"iconClass": "icon-ruby",
"tags": "builder,ruby",
@@ -121,6 +127,23 @@
"kind": "DockerImage",
"name": "registry.access.redhat.com/rhscl/ruby-23-rhel7:latest"
}
+ },
+ {
+ "name": "2.4",
+ "annotations": {
+ "openshift.io/display-name": "Ruby 2.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "description": "Build and run Ruby 2.4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-ruby-container/blob/master/2.4/README.md.",
+ "iconClass": "icon-ruby",
+ "tags": "builder,ruby",
+ "supports": "ruby:2.4,ruby",
+ "version": "2.4",
+ "sampleRepo": "https://github.com/openshift/ruby-ex.git"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/rhscl/ruby-24-rhel7:latest"
+ }
}
]
}
@@ -140,6 +163,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Node.js (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Node.js applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Node.js available on OpenShift, including major versions updates.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
@@ -155,6 +179,7 @@
"name": "0.10",
"annotations": {
"openshift.io/display-name": "Node.js 0.10",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "DEPRECATED: Build and run Node.js 0.10 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/0.10/README.md.",
"iconClass": "icon-nodejs",
"tags": "hidden,nodejs",
@@ -171,6 +196,7 @@
"name": "4",
"annotations": {
"openshift.io/display-name": "Node.js 4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Node.js 4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container/blob/master/4/README.md.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
@@ -187,6 +213,7 @@
"name": "6",
"annotations": {
"openshift.io/display-name": "Node.js 6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Node.js 6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-nodejs-container.",
"iconClass": "icon-nodejs",
"tags": "builder,nodejs",
@@ -217,6 +244,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Perl (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Perl applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Perl available on OpenShift, including major versions updates.",
"iconClass": "icon-perl",
"tags": "builder,perl",
@@ -232,6 +260,7 @@
"name": "5.16",
"annotations": {
"openshift.io/display-name": "Perl 5.16",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Perl 5.16 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.16/README.md.",
"iconClass": "icon-perl",
"tags": "hidden,builder,perl",
@@ -248,6 +277,7 @@
"name": "5.20",
"annotations": {
"openshift.io/display-name": "Perl 5.20",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Perl 5.20 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.20/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
@@ -264,6 +294,7 @@
"name": "5.24",
"annotations": {
"openshift.io/display-name": "Perl 5.24",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Perl 5.24 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-perl-container/blob/master/5.24/README.md.",
"iconClass": "icon-perl",
"tags": "builder,perl",
@@ -294,6 +325,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "PHP (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run PHP applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PHP available on OpenShift, including major versions updates.",
"iconClass": "icon-php",
"tags": "builder,php",
@@ -309,6 +341,7 @@
"name": "5.5",
"annotations": {
"openshift.io/display-name": "PHP 5.5",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run PHP 5.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.5/README.md.",
"iconClass": "icon-php",
"tags": "hidden,builder,php",
@@ -325,6 +358,7 @@
"name": "5.6",
"annotations": {
"openshift.io/display-name": "PHP 5.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run PHP 5.6 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/5.6/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
@@ -341,6 +375,7 @@
"name": "7.0",
"annotations": {
"openshift.io/display-name": "PHP 7.0",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run PHP 7.0 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-php-container/blob/master/7.0/README.md.",
"iconClass": "icon-php",
"tags": "builder,php",
@@ -371,6 +406,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Python (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Python available on OpenShift, including major versions updates.",
"iconClass": "icon-python",
"tags": "builder,python",
@@ -386,6 +422,7 @@
"name": "3.3",
"annotations": {
"openshift.io/display-name": "Python 3.3",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python 3.3 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.3/README.md.",
"iconClass": "icon-python",
"tags": "hidden,builder,python",
@@ -402,6 +439,7 @@
"name": "2.7",
"annotations": {
"openshift.io/display-name": "Python 2.7",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python 2.7 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/2.7/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
@@ -418,6 +456,7 @@
"name": "3.4",
"annotations": {
"openshift.io/display-name": "Python 3.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python 3.4 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.4/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
@@ -434,6 +473,7 @@
"name": "3.5",
"annotations": {
"openshift.io/display-name": "Python 3.5",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Build and run Python 3.5 applications on RHEL 7. For more information about using this builder image, including OpenShift considerations, see https://github.com/sclorg/s2i-python-container/blob/master/3.5/README.md.",
"iconClass": "icon-python",
"tags": "builder,python",
@@ -464,6 +504,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "MySQL (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MySQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MySQL available on OpenShift, including major versions updates.",
"iconClass": "icon-mysql-database",
"tags": "mysql"
@@ -477,6 +518,7 @@
"name": "5.5",
"annotations": {
"openshift.io/display-name": "MySQL 5.5",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MySQL 5.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.5/README.md.",
"iconClass": "icon-mysql-database",
"tags": "hidden,mysql",
@@ -491,6 +533,7 @@
"name": "5.6",
"annotations": {
"openshift.io/display-name": "MySQL 5.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MySQL 5.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.6/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
@@ -505,6 +548,7 @@
"name": "5.7",
"annotations": {
"openshift.io/display-name": "MySQL 5.7",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MySQL 5.7 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mysql-container/tree/master/5.7/README.md.",
"iconClass": "icon-mysql-database",
"tags": "mysql",
@@ -533,6 +577,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "MariaDB (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MariaDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MariaDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mariadb",
"tags": "mariadb"
@@ -546,6 +591,7 @@
"name": "10.1",
"annotations": {
"openshift.io/display-name": "MariaDB 10.1",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MariaDB 10.1 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mariadb-container/tree/master/10.1/README.md.",
"iconClass": "icon-mariadb",
"tags": "mariadb",
@@ -574,6 +620,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "PostgreSQL (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a PostgreSQL database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of PostgreSQL available on OpenShift, including major versions updates.",
"iconClass": "icon-postgresql",
"tags": "postgresql"
@@ -587,6 +634,7 @@
"name": "9.2",
"annotations": {
"openshift.io/display-name": "PostgreSQL 9.2",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a PostgreSQL 9.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.2.",
"iconClass": "icon-postgresql",
"tags": "hidden,postgresql",
@@ -601,6 +649,7 @@
"name": "9.4",
"annotations": {
"openshift.io/display-name": "PostgreSQL 9.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a PostgreSQL 9.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.4.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
@@ -615,6 +664,7 @@
"name": "9.5",
"annotations": {
"openshift.io/display-name": "PostgreSQL 9.5",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a PostgreSQL 9.5 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/postgresql-container/tree/master/9.5.",
"iconClass": "icon-postgresql",
"tags": "postgresql",
@@ -643,6 +693,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "MongoDB (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MongoDB database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of MongoDB available on OpenShift, including major versions updates.",
"iconClass": "icon-mongodb",
"tags": "mongodb"
@@ -656,6 +707,7 @@
"name": "2.4",
"annotations": {
"openshift.io/display-name": "MongoDB 2.4",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MongoDB 2.4 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.4/README.md.",
"iconClass": "icon-mongodb",
"tags": "hidden,mongodb",
@@ -670,6 +722,7 @@
"name": "2.6",
"annotations": {
"openshift.io/display-name": "MongoDB 2.6",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MongoDB 2.6 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/2.6/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
@@ -684,6 +737,7 @@
"name": "3.2",
"annotations": {
"openshift.io/display-name": "MongoDB 3.2",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a MongoDB 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/mongodb-container/tree/master/3.2/README.md.",
"iconClass": "icon-mongodb",
"tags": "mongodb",
@@ -712,6 +766,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Redis (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Redis database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Redis available on OpenShift, including major versions updates.",
"iconClass": "icon-redis",
"tags": "redis"
@@ -725,6 +780,7 @@
"name": "3.2",
"annotations": {
"openshift.io/display-name": "Redis 3.2",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Redis 3.2 database on RHEL 7. For more information about using this database image, including OpenShift considerations, see https://github.com/sclorg/redis-container/tree/master/3.2/README.md.",
"iconClass": "icon-redis",
"tags": "redis",
@@ -753,6 +809,7 @@
"name": "latest",
"annotations": {
"openshift.io/display-name": "Jenkins (Latest)",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Jenkins server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.\n\nWARNING: By selecting this tag, your application will automatically update to use the latest version of Jenkins available on OpenShift, including major versions updates.",
"iconClass": "icon-jenkins",
"tags": "jenkins"
@@ -766,6 +823,7 @@
"name": "1",
"annotations": {
"openshift.io/display-name": "Jenkins 1.X",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Jenkins 1.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
"tags": "hidden,jenkins",
@@ -780,6 +838,7 @@
"name": "2",
"annotations": {
"openshift.io/display-name": "Jenkins 2.X",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
"description": "Provides a Jenkins 2.X server on RHEL 7. For more information about using this container image, including OpenShift considerations, see https://github.com/openshift/jenkins/blob/master/README.md.",
"iconClass": "icon-jenkins",
"tags": "jenkins",
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json
index 289f809fa..a8b90a493 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql-persistent.json
@@ -8,10 +8,10 @@
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
- "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
@@ -219,7 +219,7 @@
"timeoutSeconds": 3,
"initialDelaySeconds": 30,
"httpGet": {
- "path": "/",
+ "path": "/health.php",
"port": 8080
}
},
@@ -342,7 +342,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json
index 0562982b3..e84397394 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/cakephp-mysql.json
@@ -8,10 +8,10 @@
"description": "An example CakePHP application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/cakephp-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,php,cakephp",
"iconClass": "icon-php",
- "template.openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a CakePHP application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/cakephp-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/cake-ex/blob/master/README.md.",
@@ -219,7 +219,7 @@
"timeoutSeconds": 3,
"initialDelaySeconds": 30,
"httpGet": {
- "path": "/",
+ "path": "/health.php",
"port": 8080
}
},
@@ -325,7 +325,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json
index 7a3875d09..96048f200 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql-persistent.json
@@ -8,10 +8,10 @@
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
- "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
@@ -206,7 +206,7 @@
"timeoutSeconds": 3,
"initialDelaySeconds": 30,
"httpGet": {
- "path": "/",
+ "path": "/health",
"port": 8080
}
},
@@ -307,7 +307,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json
index 399ec72a8..1c87e05f3 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/dancer-mysql.json
@@ -8,10 +8,10 @@
"description": "An example Dancer application with a MySQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,perl,dancer",
"iconClass": "icon-perl",
- "template.openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a Dancer based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/dancer-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/dancer-ex/blob/master/README.md.",
@@ -206,7 +206,7 @@
"timeoutSeconds": 3,
"initialDelaySeconds": 30,
"httpGet": {
- "path": "/",
+ "path": "/health",
"port": 8080
}
},
@@ -290,7 +290,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json
index e37f7a492..060f45dac 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql-persistent.json
@@ -8,10 +8,10 @@
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
- "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/django-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
@@ -311,7 +311,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json
index 965c2ebfe..66f2f0ca1 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/django-postgresql.json
@@ -8,10 +8,10 @@
"description": "An example Django application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,python,django",
"iconClass": "icon-python",
- "template.openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/django-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a Django based application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/django-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/django-ex/blob/master/README.md.",
@@ -294,7 +294,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json
index 6cf9d76eb..ebba9ee65 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/httpd.json
@@ -4,14 +4,14 @@
"metadata": {
"name": "httpd-example",
"annotations": {
- "openshift.io/display-name": "Httpd",
- "description": "An example Httpd application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
+ "openshift.io/display-name": "Apache HTTP Server",
+ "description": "An example Apache HTTP Server (httpd) application that serves static content. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
"tags": "quickstart,httpd",
"iconClass": "icon-apache",
- "template.openshift.io/long-description": "This template defines resources needed to develop a static application served by httpd, including a build configuration and application deployment configuration.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/httpd-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a static application served by Apache HTTP Server (httpd), including a build configuration and application deployment configuration.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/httpd-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/httpd-ex/blob/master/README.md.",
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json
index 62f43bc0b..28b4b9d81 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-ephemeral-template.json
@@ -8,10 +8,10 @@
"description": "Jenkins service, without persistent storage.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
- "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login. The Jenkins configuration is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login. The Jenkins configuration is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
@@ -112,10 +112,6 @@
"value": "true"
},
{
- "name": "OPENSHIFT_JENKINS_JVM_ARCH",
- "value": "${JVM_ARCH}"
- },
- {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -124,6 +120,10 @@
"value": "true"
},
{
+ "name": "JENKINS_SERVICE_NAME",
+ "value": "${JENKINS_SERVICE_NAME}"
+ },
+ {
"name": "JNLP_SERVICE_NAME",
"value": "${JNLP_SERVICE_NAME}"
}
@@ -260,12 +260,6 @@
"value": "true"
},
{
- "name": "JVM_ARCH",
- "displayName": "Jenkins JVM Architecture",
- "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.",
- "value": "i386"
- },
- {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json
index e9068e455..4915bb12c 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/jenkins-persistent-template.json
@@ -8,10 +8,10 @@
"description": "Jenkins service, with persistent storage.\n\nNOTE: You must have persistent volumes available in your cluster to use this template.",
"iconClass": "icon-jenkins",
"tags": "instant-app,jenkins",
- "template.openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template deploys a Jenkins server capable of managing OpenShift Pipeline builds and supporting OpenShift-based oauth login.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://docs.openshift.org/latest/using_images/other_images/jenkins.html",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "A Jenkins service has been created in your project. Log into Jenkins with your OpenShift account. The tutorial at https://github.com/openshift/origin/blob/master/examples/jenkins/README.md contains more information about using this template.",
@@ -129,10 +129,6 @@
"value": "true"
},
{
- "name": "OPENSHIFT_JENKINS_JVM_ARCH",
- "value": "${JVM_ARCH}"
- },
- {
"name": "KUBERNETES_MASTER",
"value": "https://kubernetes.default:443"
},
@@ -141,6 +137,10 @@
"value": "true"
},
{
+ "name": "JENKINS_SERVICE_NAME",
+ "value": "${JENKINS_SERVICE_NAME}"
+ },
+ {
"name": "JNLP_SERVICE_NAME",
"value": "${JNLP_SERVICE_NAME}"
}
@@ -277,12 +277,6 @@
"value": "true"
},
{
- "name": "JVM_ARCH",
- "displayName": "Jenkins JVM Architecture",
- "description": "Whether Jenkins runs with a 32 bit (i386) or 64 bit (x86_64) JVM.",
- "value": "i386"
- },
- {
"name": "MEMORY_LIMIT",
"displayName": "Memory Limit",
"description": "Maximum amount of memory the container can use.",
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json
index df3704b9f..9543b5681 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb-persistent.json
@@ -8,10 +8,10 @@
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
- "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
@@ -309,7 +309,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json
index eb6ab33d9..0649d6204 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/nodejs-mongodb.json
@@ -8,10 +8,10 @@
"description": "An example Node.js application with a MongoDB database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,nodejs",
"iconClass": "icon-nodejs",
- "template.openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a NodeJS application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/nodejs-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/nodejs-ex/blob/master/README.md.",
@@ -292,7 +292,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json
index 59e2e41ea..3810ef727 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql-persistent.json
@@ -8,10 +8,10 @@
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
- "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
@@ -354,7 +354,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json
index b3d080a91..3d8336c5a 100644
--- a/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json
+++ b/roles/openshift_examples/files/examples/v3.7/quickstart-templates/rails-postgresql.json
@@ -8,10 +8,10 @@
"description": "An example Rails application with a PostgreSQL database. For more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.\n\nWARNING: Any data stored will be lost upon pod destruction. Only use this template for testing.",
"tags": "quickstart,ruby,rails",
"iconClass": "icon-ruby",
- "template.openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
- "template.openshift.io/provider-display-name": "Red Hat, Inc.",
- "template.openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
- "template.openshift.io/support-url": "https://access.redhat.com"
+ "openshift.io/long-description": "This template defines resources needed to develop a Rails application, including a build configuration, application deployment configuration, and database deployment configuration. The database is stored in non-persistent storage, so this configuration should be used for experimental purposes only.",
+ "openshift.io/provider-display-name": "Red Hat, Inc.",
+ "openshift.io/documentation-url": "https://github.com/openshift/rails-ex",
+ "openshift.io/support-url": "https://access.redhat.com"
}
},
"message": "The following service(s) have been created in your project: ${NAME}, ${DATABASE_SERVICE_NAME}.\n\nFor more information about using this template, including OpenShift considerations, see https://github.com/openshift/rails-ex/blob/master/README.md.",
@@ -337,7 +337,8 @@
"metadata": {
"name": "${DATABASE_SERVICE_NAME}",
"annotations": {
- "description": "Defines how to deploy the database"
+ "description": "Defines how to deploy the database",
+ "template.alpha.openshift.io/wait-for-ready": "true"
}
},
"spec": {
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json b/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json
index 0bb56452b..0aad7fae6 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-streams/jboss-image-streams.json
@@ -314,6 +314,38 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "jboss-eap71-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-eap-7/eap71-openshift",
+ "tags": [
+ {
+ "name": "1.0-TP",
+ "annotations": {
+ "description": "JBoss EAP 7.1 Tech Preview.",
+ "iconClass": "icon-jboss",
+ "tags": "builder,eap,javaee,java,jboss,xpaas",
+ "supports":"eap:7.1,javaee:7,java:8,xpaas:1.0",
+ "sampleRepo": "https://github.com/jboss-developer/jboss-eap-quickstarts.git",
+ "sampleContextDir": "kitchensink",
+ "sampleRef": "7.0.0.GA",
+ "version": "1.0"
+ },
+ "from": {
+ "kind": "DockerImage",
+ "name": "registry.access.redhat.com/jboss-eap-7-tech-preview/eap71-openshift:1.0"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jboss-decisionserver62-openshift",
"annotations": {
"openshift.io/display-name": "Red Hat JBoss BRMS 6.2 decision server"
@@ -524,6 +556,32 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "jboss-datagrid71-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Grid 7.1 S2I images.",
+ "iconClass": "icon-jboss",
+ "tags": "datagrid,jboss,xpaas",
+ "supports": "datagrid:7.1,xpaas:1.0",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jboss-datagrid65-client-openshift",
"annotations": {
"openshift.io/display-name": "Red Hat JBoss Data Grid 6.5 Client Modules for EAP"
@@ -549,6 +607,31 @@
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
+ "name": "jboss-datagrid71-client-openshift",
+ "annotations": {
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP"
+ }
+ },
+ "spec": {
+ "dockerImageRepository": "registry.access.redhat.com/jboss-datagrid-7/datagrid71-client-openshift",
+ "tags": [
+ {
+ "name": "1.0",
+ "annotations": {
+ "description": "JBoss Data Grid 7.1 Client Modules for EAP.",
+ "iconClass": "icon-jboss",
+ "tags": "client,jboss,xpaas",
+ "version": "1.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 Client Modules for EAP"
+ }
+ }
+ ]
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
"name": "jboss-datavirt63-openshift",
"annotations": {
"openshift.io/display-name": "Red Hat JBoss Data Virtualization 6.3"
@@ -671,6 +754,16 @@
"supports":"amq:6.2,messaging,xpaas:1.4",
"version": "1.4"
}
+ },
+ {
+ "name": "1.5",
+ "annotations": {
+ "description": "JBoss A-MQ 6.2 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports":"amq:6.2,messaging,xpaas:1.5",
+ "version": "1.5"
+ }
}
]
}
@@ -697,6 +790,17 @@
"version": "1.0",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
}
+ },
+ {
+ "name": "1.1",
+ "annotations": {
+ "description": "JBoss A-MQ 6.3 broker image.",
+ "iconClass": "icon-jboss",
+ "tags": "messaging,amq,jboss,xpaas",
+ "supports": "amq:6.3,messaging,xpaas:1.1",
+ "version": "1.1",
+ "openshift.io/display-name": "Red Hat JBoss A-MQ 6.3"
+ }
}
]
}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json
index af20b373a..d219ead67 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-basic.json
@@ -6,14 +6,14 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.4.0",
+ "version": "1.5.0",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral, no SSL)"
},
"name": "amq62-basic"
},
"labels": {
"template": "amq62-basic",
- "xpaas": "1.4.0"
+ "xpaas": "1.5.0"
},
"message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
@@ -215,7 +215,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.4"
+ "name": "jboss-amq-62:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json
index 5acdbfabf..529a2a8ec 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent-ssl.json
@@ -3,17 +3,17 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages, including message migration when the number of pods are reduced. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.4.0",
+ "version": "1.5.0",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent with SSL)"
},
"name": "amq62-persistent-ssl"
},
"labels": {
"template": "amq62-persistent-ssl",
- "xpaas": "1.4.0"
+ "xpaas": "1.5.0"
},
"message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
@@ -26,9 +26,9 @@
},
{
"displayName": "Split Data?",
- "description": "Split the data directory for each node in a mesh.",
+ "description": "Split the data directory for each node in a mesh, this is now the default behaviour.",
"name": "AMQ_SPLIT",
- "value": "false",
+ "value": "true",
"required": false
},
{
@@ -360,7 +360,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.4"
+ "name": "jboss-amq-62:1.5"
}
}
},
@@ -546,6 +546,114 @@
}
},
{
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-drainer",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-drainer"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-drainer"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-drainer",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-drainer",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-drainer",
+ "image": "jboss-amq-62",
+ "command": [
+ "/opt/amq/bin/drain.sh"
+ ],
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json
index b8089cd6d..b17d23c06 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-persistent.json
@@ -3,17 +3,17 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages, including message migration when the number of pods are reduced. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.4.0",
+ "version": "1.5.0",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Persistent, no SSL)"
},
"name": "amq62-persistent"
},
"labels": {
"template": "amq62-persistent",
- "xpaas": "1.4.0"
+ "xpaas": "1.5.0"
},
"message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
@@ -26,9 +26,9 @@
},
{
"displayName": "Split Data?",
- "description": "Split the data directory for each node in a mesh.",
+ "description": "Split the data directory for each node in a mesh, this is now the default behaviour.",
"name": "AMQ_SPLIT",
- "value": "false",
+ "value": "true",
"required": false
},
{
@@ -229,7 +229,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.4"
+ "name": "jboss-amq-62:1.5"
}
}
},
@@ -363,6 +363,114 @@
}
},
{
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-drainer",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-drainer"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.5"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-drainer"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-drainer",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-drainer",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-drainer",
+ "image": "jboss-amq-62",
+ "command": [
+ "/opt/amq/bin/drain.sh"
+ ],
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json
index b52fdbfb0..a4a099e08 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq62-ssl.json
@@ -6,14 +6,14 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.4.0",
+ "version": "1.5.0",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.2 (Ephemeral with SSL)"
},
"name": "amq62-ssl"
},
"labels": {
"template": "amq62-ssl",
- "xpaas": "1.4.0"
+ "xpaas": "1.5.0"
},
"message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
@@ -346,7 +346,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-62:1.4"
+ "name": "jboss-amq-62:1.5"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json
index d29f6a300..4655d3174 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-basic.json
@@ -6,14 +6,14 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.0",
+ "version": "1.1",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral, no SSL)"
},
"name": "amq63-basic"
},
"labels": {
"template": "amq63-basic",
- "xpaas": "1.4.0"
+ "xpaas": "1.5.0"
},
"message": "A new messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
@@ -215,7 +215,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-63:1.0"
+ "name": "jboss-amq-63:1.1"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json
index 47f6396dd..0e8b80592 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent-ssl.json
@@ -3,17 +3,17 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages. This template supports SSL and requires usage of OpenShift secrets.",
+ "description": "Application template for JBoss A-MQ brokers. These are deployed as standalone and use persistent storage for saving messages, including message migration when the number of pods are reduced. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.0",
+ "version": "1.1",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent with SSL)"
},
"name": "amq63-persistent-ssl"
},
"labels": {
"template": "amq63-persistent-ssl",
- "xpaas": "1.4.0"
+ "xpaas": "1.5.0"
},
"message": "A new persistent messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
@@ -26,9 +26,9 @@
},
{
"displayName": "Split Data?",
- "description": "Split the data directory for each node in a mesh.",
+ "description": "Split the data directory for each node in a mesh, this is now the default behaviour.",
"name": "AMQ_SPLIT",
- "value": "false",
+ "value": "true",
"required": false
},
{
@@ -360,7 +360,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-63:1.0"
+ "name": "jboss-amq-63:1.1"
}
}
},
@@ -546,6 +546,114 @@
}
},
{
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-drainer",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-drainer"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-drainer"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-drainer",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-drainer",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-drainer",
+ "image": "jboss-amq-63",
+ "command": [
+ "/opt/amq/bin/drain.sh"
+ ],
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json
index 4b64203c4..b94a8bbb8 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-persistent.json
@@ -3,17 +3,17 @@
"apiVersion": "v1",
"metadata": {
"annotations": {
- "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages. This template doesn't feature SSL support.",
+ "description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone and use persistent storage for saving messages, including message migration when the number of pods are reduced. This template doesn't feature SSL support.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.0",
+ "version": "1.1",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Persistent, no SSL)"
},
"name": "amq63-persistent"
},
"labels": {
"template": "amq63-persistent",
- "xpaas": "1.4.0"
+ "xpaas": "1.5.0"
},
"message": "A new persistent messaging service has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}.",
"parameters": [
@@ -26,9 +26,9 @@
},
{
"displayName": "Split Data?",
- "description": "Split the data directory for each node in a mesh.",
+ "description": "Split the data directory for each node in a mesh, this is now the default behaviour.",
"name": "AMQ_SPLIT",
- "value": "false",
+ "value": "true",
"required": false
},
{
@@ -229,7 +229,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-63:1.0"
+ "name": "jboss-amq-63:1.1"
}
}
},
@@ -363,6 +363,114 @@
}
},
{
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-drainer",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-drainer"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-63:1.1"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-drainer"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-drainer",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-drainer",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-drainer",
+ "image": "jboss-amq-63",
+ "command": [
+ "/opt/amq/bin/drain.sh"
+ ],
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
"apiVersion": "v1",
"kind": "PersistentVolumeClaim",
"metadata": {
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json
index 20ad50016..f2de718c5 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/amq63-ssl.json
@@ -6,14 +6,14 @@
"description": "Application template for JBoss A-MQ brokers. These can be deployed as standalone or in a mesh. This template supports SSL and requires usage of OpenShift secrets.",
"iconClass": "icon-jboss",
"tags": "messaging,amq,jboss,xpaas",
- "version": "1.0",
+ "version": "1.1",
"openshift.io/display-name": "Red Hat JBoss A-MQ 6.3 (Ephemeral with SSL)"
},
"name": "amq63-ssl"
},
"labels": {
"template": "amq63-ssl",
- "xpaas": "1.4.0"
+ "xpaas": "1.5.0"
},
"message": "A new messaging service with SSL support has been created in your project. It will handle the protocol(s) \"${MQ_PROTOCOL}\". The username/password for accessing the service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"amq-service-account\" service account and a secret named \"${AMQ_SECRET}\" containing the trust store and key store files (\"${AMQ_TRUSTSTORE}\" and \"${AMQ_KEYSTORE}\") used for serving secure content.",
"parameters": [
@@ -346,7 +346,7 @@
"from": {
"kind": "ImageStreamTag",
"namespace": "${IMAGE_STREAM_NAMESPACE}",
- "name": "jboss-amq-63:1.0"
+ "name": "jboss-amq-63:1.1"
}
}
},
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-basic.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-basic.json
new file mode 100644
index 000000000..200f232ea
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-basic.json
@@ -0,0 +1,372 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 7.1 applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 (Ephemeral, no https)"
+ },
+ "name": "datagrid71-basic"
+ },
+ "labels": {
+ "template": "datagrid71-basic",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\".",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Username",
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Infinispan Connectors",
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "displayName": "Cache Names",
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Memcached Cache Name",
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "displayName": "REST Security Domain",
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid71-openshift:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid71-openshift",
+ "imagePullPolicy": "Always",
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod-internal",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-https.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-https.json
new file mode 100644
index 000000000..b2c385608
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-https.json
@@ -0,0 +1,550 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 7.1 applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 (Ephemeral with https)"
+ },
+ "name": "datagrid71-https"
+ },
+ "labels": {
+ "template": "datagrid71-https",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data grid service has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Username",
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Infinispan Connectors",
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "displayName": "Cache Names",
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Memcached Cache Name",
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "displayName": "REST Security Domain",
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid71-openshift:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid71-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod-internal",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql-persistent.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql-persistent.json
new file mode 100644
index 000000000..8512ead55
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql-persistent.json
@@ -0,0 +1,852 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 7.1 and MySQL applications with persistent storage.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 + MySQL (Persistent with https)"
+ },
+ "name": "datagrid71-mysql-persistent"
+ },
+ "labels": {
+ "template": "datagrid71-mysql-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data grid service (using MySQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Username",
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:/jboss/datasources/mysql",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Infinispan Connectors",
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "displayName": "Cache Names",
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Memcached Cache Name",
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "displayName": "REST Security Domain",
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid71-openshift:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid71-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod-internal",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql.json
new file mode 100644
index 000000000..9ac76cecc
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-mysql.json
@@ -0,0 +1,811 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 7.1 and MySQL applications.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 + MySQL (Ephemeral with https)"
+ },
+ "name": "datagrid71-mysql"
+ },
+ "labels": {
+ "template": "datagrid71-mysql",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data grid service (using MySQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Username",
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:/jboss/datasources/mysql",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Infinispan Connectors",
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "displayName": "Cache Names",
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Memcached Cache Name",
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "displayName": "REST Security Domain",
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid71-openshift:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid71-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod-internal",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql-persistent.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql-persistent.json
new file mode 100644
index 000000000..8f7180ff6
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql-persistent.json
@@ -0,0 +1,824 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 7.1 and PostgreSQL applications with persistent storage.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 + PostgreSQL (Persistent with https)"
+ },
+ "name": "datagrid71-postgresql-persistent"
+ },
+ "labels": {
+ "template": "datagrid71-postgresql-persistent",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data grid service (using PostgreSQL with persistent storage) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Username",
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/postgresql",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Infinispan Connectors",
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "displayName": "Cache Names",
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configured for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Memcached Cache Name",
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "displayName": "REST Security Domain",
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid71-openshift:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid71-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod-internal",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql.json
new file mode 100644
index 000000000..0e9fea735
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/datagrid71-postgresql.json
@@ -0,0 +1,783 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for JDG 7.1 and PostgreSQL applications built using.",
+ "tags": "datagrid,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss Data Grid 7.1 + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "datagrid71-postgresql"
+ },
+ "labels": {
+ "template": "datagrid71-postgresql",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new data grid service (using PostgreSQL) has been created in your project. It supports connector type(s) \"${INFINISPAN_CONNECTORS}\". The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"datagrid-service-account\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "datagrid-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Username",
+ "description": "User name for JDG user.",
+ "name": "USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Password",
+ "description": "The password to access the JDG Caches. Must be different than username; must not be root, admin, or administrator; must contain at least 8 characters, 1 alphabetic character(s), 1 digit(s), and 1 non-alphanumeric symbol(s). (optional)",
+ "name": "PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "datagrid-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/postgresql",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Infinispan Connectors",
+ "description": "Comma-separated list of connector types that should be configured (defaults to 'memcached,hotrod,rest')",
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "hotrod,memcached,rest",
+ "required": false
+ },
+ {
+ "displayName": "Cache Names",
+ "description": "Comma-separated list of caches to configure. By default, a distributed-cache, with a mode of SYNC will be configurd for each entry.",
+ "name": "CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datavirt Cache Names",
+ "description": "Comma-separated list of caches to configure for use by Red Hat JBoss Data Virtualization for materialization of views. Three caches will be created for each named cache: <name>, <name>_staging and <name>_alias.",
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Default Cache Type",
+ "description": "Default cache type for all caches. If empty then distributed will be the default",
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Encryption Requires SSL Client Authentication?",
+ "description": "",
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Memcached Cache Name",
+ "description": "The name of the cache to expose through this memcached connector (defaults to 'default')",
+ "name": "MEMCACHED_CACHE",
+ "value": "default",
+ "required": false
+ },
+ {
+ "displayName": "REST Security Domain",
+ "description": "The domain, declared in the security subsystem, that should be used to authenticate access to the REST endpoint",
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "datagrid-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11211,
+ "targetPort": 11211
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-memcached",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Memcached service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 11333,
+ "targetPort": 11333
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-hotrod",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Hot Rod service for clustered applications."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-datagrid71-openshift:1.0"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "datagrid-service-account",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "jboss-datagrid71-openshift",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "datagrid-keystore-volume",
+ "mountPath": "/etc/datagrid-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/datagrid/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ },
+ {
+ "name": "memcached",
+ "containerPort": 11211,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod-internal",
+ "containerPort": 11222,
+ "protocol": "TCP"
+ },
+ {
+ "name": "hotrod",
+ "containerPort": 11333,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "USERNAME",
+ "value": "${USERNAME}"
+ },
+ {
+ "name": "PASSWORD",
+ "value": "${PASSWORD}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/datagrid-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "DEFAULT_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_TYPE",
+ "value": "string"
+ },
+ {
+ "name": "MEMCACHED_JDBC_STORE_DATASOURCE",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "INFINISPAN_CONNECTORS",
+ "value": "${INFINISPAN_CONNECTORS}"
+ },
+ {
+ "name": "CACHE_NAMES",
+ "value": "${CACHE_NAMES}"
+ },
+ {
+ "name": "DATAVIRT_CACHE_NAMES",
+ "value": "${DATAVIRT_CACHE_NAMES}"
+ },
+ {
+ "name": "CACHE_TYPE_DEFAULT",
+ "value": "${CACHE_TYPE_DEFAULT}"
+ },
+ {
+ "name": "ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH",
+ "value": "${ENCRYPTION_REQUIRE_SSL_CLIENT_AUTH}"
+ },
+ {
+ "name": "HOTROD_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-hotrod"
+ },
+ {
+ "name": "MEMCACHED_CACHE",
+ "value": "${MEMCACHED_CACHE}"
+ },
+ {
+ "name": "REST_SECURITY_DOMAIN",
+ "value": "${REST_SECURITY_DOMAIN}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "datagrid-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "datagrid-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-persistent-s2i.json
new file mode 100644
index 000000000..4917dbe23
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-persistent-s2i.json
@@ -0,0 +1,872 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + A-MQ (Persistent with https)"
+ },
+ "name": "eap71-amq-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap71-amq-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 and A-MQ persistent based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Volume Size",
+ "description": "Size of the volume used by A-MQ for persisting messages.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "displayName": "Split Data?",
+ "description": "Split the data directory for each node in a mesh.",
+ "name": "AMQ_SPLIT",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Rolling",
+ "rollingParams": {
+ "maxSurge": 0
+ }
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.4"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/opt/amq/data/kahadb",
+ "name": "${APPLICATION_NAME}-amq-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_SPLIT",
+ "value": "${AMQ_SPLIT}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-amq-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-amq-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-s2i.json
new file mode 100644
index 000000000..8344be836
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-amq-s2i.json
@@ -0,0 +1,817 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 A-MQ applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + A-MQ (with https)"
+ },
+ "name": "eap71-amq-s2i"
+ },
+ "labels": {
+ "template": "eap71-amq-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 and A-MQ based application with SSL support has been created in your project. The username/password for accessing the A-MQ service is ${MQ_USERNAME}/${MQ_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts.git",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "helloworld-mdb",
+ "required": false
+ },
+ {
+ "displayName": "JMS Connection Factory JNDI Name",
+ "description": "JNDI name for connection factory used by applications to connect to the broker, e.g. java:/ConnectionFactory",
+ "name": "MQ_JNDI",
+ "value": "java:/ConnectionFactory",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Protocols",
+ "description": "Broker protocols to configure, separated by commas. Allowed values are: `openwire`, `amqp`, `stomp` and `mqtt`. Only `openwire` is supported by EAP.",
+ "name": "MQ_PROTOCOL",
+ "value": "openwire",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names, separated by commas. These queues will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_QUEUES",
+ "value": "HELLOWORLDMDBQueue",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names, separated by commas. These topics will be automatically created when the broker starts. Also, they will be made accessible as JNDI resources in EAP.",
+ "name": "MQ_TOPICS",
+ "value": "HELLOWORLDMDBTopic",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Serializable Packages",
+ "description": "List of packages that are allowed to be serialized for use in ObjectMessage, separated by commas. If your app doesn't use ObjectMessages, leave this blank. This is a security enforcement. For the rationale, see http://activemq.apache.org/objectmessage.html",
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Username",
+ "description": "User name for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Password",
+ "description": "Password for standard broker user. It is required for connecting to the broker. If left empty, it will be generated.",
+ "name": "MQ_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Mesh Discovery Type",
+ "description": "The discovery agent type to use for discovering mesh endpoints. 'dns' will use OpenShift's DNS service to resolve endpoints. 'kube' will use Kubernetes REST API to resolve service endpoints. If using 'kube' the service account for the pod must have the 'view' role, which can be added via 'oc policy add-role-to-user view system:serviceaccount:<namespace>:default' where <namespace> is the project namespace.",
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "kube",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ Storage Limit",
+ "description": "The A-MQ storage usage limit",
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "100 gb",
+ "required": false
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTP port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's HTTPS port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-amq-tcp\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 61616,
+ "targetPort": 61616
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq-tcp",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The broker's OpenWire port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTP service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's HTTPS service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MQ_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-amq=MQ"
+ },
+ {
+ "name": "MQ_JNDI",
+ "value": "${MQ_JNDI}"
+ },
+ {
+ "name": "MQ_USERNAME",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "MQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "MQ_PROTOCOL",
+ "value": "tcp"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-amq"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-amq-62:1.4"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-amq",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-amq",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-amq",
+ "image": "jboss-amq-62",
+ "imagePullPolicy": "Always",
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/amq/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp",
+ "containerPort": 5672,
+ "protocol": "TCP"
+ },
+ {
+ "name": "amqp-ssl",
+ "containerPort": 5671,
+ "protocol": "TCP"
+ },
+ {
+ "name": "mqtt",
+ "containerPort": 1883,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp",
+ "containerPort": 61613,
+ "protocol": "TCP"
+ },
+ {
+ "name": "stomp-ssl",
+ "containerPort": 61612,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp",
+ "containerPort": 61616,
+ "protocol": "TCP"
+ },
+ {
+ "name": "tcp-ssl",
+ "containerPort": 61617,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "AMQ_USER",
+ "value": "${MQ_USERNAME}"
+ },
+ {
+ "name": "AMQ_PASSWORD",
+ "value": "${MQ_PASSWORD}"
+ },
+ {
+ "name": "AMQ_TRANSPORTS",
+ "value": "${MQ_PROTOCOL}"
+ },
+ {
+ "name": "AMQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "AMQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "MQ_SERIALIZABLE_PACKAGES",
+ "value": "${MQ_SERIALIZABLE_PACKAGES}"
+ },
+ {
+ "name": "AMQ_MESH_DISCOVERY_TYPE",
+ "value": "${AMQ_MESH_DISCOVERY_TYPE}"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAME",
+ "value": "${APPLICATION_NAME}-amq-tcp"
+ },
+ {
+ "name": "AMQ_MESH_SERVICE_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "AMQ_STORAGE_USAGE_LIMIT",
+ "value": "${AMQ_STORAGE_USAGE_LIMIT}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-basic-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-basic-s2i.json
new file mode 100644
index 000000000..751bc5650
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-basic-s2i.json
@@ -0,0 +1,389 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 (no https)"
+ },
+ "name": "eap71-basic-s2i"
+ },
+ "labels": {
+ "template": "eap71-basic-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 based application has been created in your project.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.0.GA",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ cluster password",
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-https-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-https-s2i.json
new file mode 100644
index 000000000..06ae63685
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-https-s2i.json
@@ -0,0 +1,585 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 applications built using S2I.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 (with https)"
+ },
+ "name": "eap71-https-s2i"
+ },
+ "labels": {
+ "template": "eap71-https-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-developer/jboss-eap-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.0.GA",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "kitchensink",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ cluster password",
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-persistent-s2i.json
new file mode 100644
index 000000000..8f10754ce
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-persistent-s2i.json
@@ -0,0 +1,862 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MongoDB applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + MongoDB (Persistent with https)"
+ },
+ "name": "eap71-mongodb-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap71-mongodb-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 and MongoDB persistent based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ cluster password",
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mongodb/data",
+ "name": "${APPLICATION_NAME}-mongodb-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mongodb-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-s2i.json
new file mode 100644
index 000000000..a1d0ac4b4
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mongodb-s2i.json
@@ -0,0 +1,821 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MongoDB applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + MongoDB (Ephemeral with https)"
+ },
+ "name": "eap71-mongodb-s2i"
+ },
+ "labels": {
+ "template": "eap71-mongodb-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 and MongoDB based application with SSL support has been created in your project. The username/password for accessing the MongoDB database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD} (Admin password is \"${DB_ADMIN_PASSWORD}\"). Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-mongodb",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mongodb",
+ "name": "DB_JNDI",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB No Preallocation",
+ "description": "Disable data file preallocation.",
+ "name": "MONGODB_NOPREALLOC",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Small Files",
+ "description": "Set MongoDB to use a smaller default data file size.",
+ "name": "MONGODB_SMALLFILES",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Quiet",
+ "description": "Runs MongoDB in a quiet mode that attempts to limit the amount of output.",
+ "name": "MONGODB_QUIET",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ cluster password",
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database admin password",
+ "description": "Database admin password",
+ "name": "DB_ADMIN_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MongoDB Image Stream Tag",
+ "description": "The tag to use for the \"mongodb\" image stream. Typically, this aligns with the major.minor version of MongoDB.",
+ "name": "MONGODB_IMAGE_STREAM_TAG",
+ "value": "3.2",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mongodb\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 27017,
+ "targetPort": 27017
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mongodb=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "DB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mongodb"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mongodb:${MONGODB_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mongodb",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mongodb",
+ "image": "mongodb",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 27017,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MONGODB_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MONGODB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MONGODB_ADMIN_PASSWORD",
+ "value": "${DB_ADMIN_PASSWORD}"
+ },
+ {
+ "name": "MONGODB_NOPREALLOC",
+ "value": "${MONGODB_NOPREALLOC}"
+ },
+ {
+ "name": "MONGODB_SMALLFILES",
+ "value": "${MONGODB_SMALLFILES}"
+ },
+ {
+ "name": "MONGODB_QUIET",
+ "value": "${MONGODB_QUIET}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-persistent-s2i.json
new file mode 100644
index 000000000..6faabd01e
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-persistent-s2i.json
@@ -0,0 +1,878 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MySQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + MySQL (Persistent with https)"
+ },
+ "name": "eap71-mysql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap71-mysql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 and MySQL persistent based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ cluster password",
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/mysql/data",
+ "name": "${APPLICATION_NAME}-mysql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-mysql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-mysql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-s2i.json
new file mode 100644
index 000000000..9f1c260b3
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-mysql-s2i.json
@@ -0,0 +1,837 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 MySQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + MySQL (Ephemeral with https)"
+ },
+ "name": "eap71-mysql-s2i"
+ },
+ "labels": {
+ "template": "eap71-mysql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 and MySQL based application with SSL support has been created in your project. The username/password for accessing the MySQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/mysql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Lower Case Table Names",
+ "description": "Sets how the table names are stored and compared.",
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Maximum number of connections",
+ "description": "The maximum permitted number of simultaneous client connections.",
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Minimum Word Length",
+ "description": "The minimum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL FullText Maximum Word Length",
+ "description": "The maximum length of the word to be included in a FULLTEXT index.",
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "required": false
+ },
+ {
+ "displayName": "MySQL AIO",
+ "description": "Controls the innodb_use_native_aio setting value if the native AIO is broken.",
+ "name": "MYSQL_AIO",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ cluster password",
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "MySQL Image Stream Tag",
+ "description": "The tag to use for the \"mysql\" image stream. Typically, this aligns with the major.minor version of MySQL.",
+ "name": "MYSQL_IMAGE_STREAM_TAG",
+ "value": "5.7",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-mysql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 3306,
+ "targetPort": 3306
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-mysql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-mysql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-mysql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-mysql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "mysql:${MYSQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-mysql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-mysql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-mysql",
+ "image": "mysql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 3306,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "MYSQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "MYSQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "MYSQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "MYSQL_LOWER_CASE_TABLE_NAMES",
+ "value": "${MYSQL_LOWER_CASE_TABLE_NAMES}"
+ },
+ {
+ "name": "MYSQL_MAX_CONNECTIONS",
+ "value": "${MYSQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "MYSQL_FT_MIN_WORD_LEN",
+ "value": "${MYSQL_FT_MIN_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_FT_MAX_WORD_LEN",
+ "value": "${MYSQL_FT_MAX_WORD_LEN}"
+ },
+ {
+ "name": "MYSQL_AIO",
+ "value": "${MYSQL_AIO}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-persistent-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-persistent-s2i.json
new file mode 100644
index 000000000..2aff9d795
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-persistent-s2i.json
@@ -0,0 +1,852 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 PostgreSQL applications with persistent storage built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + PostgreSQL (Persistent with https)"
+ },
+ "name": "eap71-postgresql-persistent-s2i"
+ },
+ "labels": {
+ "template": "eap71-postgresql-persistent-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 and PostgreSQL persistent based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Database Volume Capacity",
+ "description": "Size of persistent storage for database volume.",
+ "name": "VOLUME_CAPACITY",
+ "value": "512Mi",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ cluster password",
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "volumeMounts": [
+ {
+ "mountPath": "/var/lib/pgsql/data",
+ "name": "${APPLICATION_NAME}-postgresql-pvol"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql-pvol",
+ "persistentVolumeClaim": {
+ "claimName": "${APPLICATION_NAME}-postgresql-claim"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "PersistentVolumeClaim",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql-claim",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "accessModes": [
+ "ReadWriteOnce"
+ ],
+ "resources": {
+ "requests": {
+ "storage": "${VOLUME_CAPACITY}"
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-s2i.json
new file mode 100644
index 000000000..5bb6aaffb
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-postgresql-s2i.json
@@ -0,0 +1,811 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "description": "Application template for EAP 7 PostgreSQL applications built using S2I.",
+ "iconClass": "icon-jboss",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + PostgreSQL (Ephemeral with https)"
+ },
+ "name": "eap71-postgresql-s2i"
+ },
+ "labels": {
+ "template": "eap71-postgresql-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 and PostgreSQL based application with SSL support has been created in your project. The username/password for accessing the PostgreSQL database \"${DB_DATABASE}\" is ${DB_USERNAME}/${DB_PASSWORD}. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "1.3",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "todolist/todolist-jdbc",
+ "required": false
+ },
+ {
+ "displayName": "Database JNDI Name",
+ "description": "Database JNDI name used by application to resolve the datasource, e.g. java:/jboss/datasources/postgresql",
+ "name": "DB_JNDI",
+ "value": "java:jboss/datasources/TodoListDS",
+ "required": false
+ },
+ {
+ "displayName": "Database Name",
+ "description": "Database name",
+ "name": "DB_DATABASE",
+ "value": "root",
+ "required": true
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Minimum Pool Size",
+ "description": "Sets xa-pool/min-pool-size for the configured datasource.",
+ "name": "DB_MIN_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Maximum Pool Size",
+ "description": "Sets xa-pool/max-pool-size for the configured datasource.",
+ "name": "DB_MAX_POOL_SIZE",
+ "required": false
+ },
+ {
+ "displayName": "Datasource Transaction Isolation",
+ "description": "Sets transaction-isolation for the configured datasource.",
+ "name": "DB_TX_ISOLATION",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Maximum number of connections",
+ "description": "The maximum number of client connections allowed. This also sets the maximum number of prepared transactions.",
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Shared Buffers",
+ "description": "Configures how much memory is dedicated to PostgreSQL for caching data.",
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "required": false
+ },
+ {
+ "displayName": "A-MQ cluster password",
+ "description": "A-MQ cluster admin password",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Username",
+ "description": "Database user name",
+ "name": "DB_USERNAME",
+ "from": "user[a-zA-Z0-9]{3}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Database Password",
+ "description": "Database user password",
+ "name": "DB_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "PostgreSQL Image Stream Tag",
+ "description": "The tag to use for the \"postgresql\" image stream. Typically, this aligns with the major.minor version of PostgreSQL.",
+ "name": "POSTGRESQL_IMAGE_STREAM_TAG",
+ "value": "9.5",
+ "required": true
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port.",
+ "service.alpha.openshift.io/dependencies": "[{\"name\": \"${APPLICATION_NAME}-postgresql\", \"kind\": \"Service\"}]"
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 5432,
+ "targetPort": 5432
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The database server's port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "DB_SERVICE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_JNDI",
+ "value": "${DB_JNDI}"
+ },
+ {
+ "name": "DB_USERNAME",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "DB_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "DB_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "TX_DATABASE_PREFIX_MAPPING",
+ "value": "${APPLICATION_NAME}-postgresql=DB"
+ },
+ {
+ "name": "DB_MIN_POOL_SIZE",
+ "value": "${DB_MIN_POOL_SIZE}"
+ },
+ {
+ "name": "DB_MAX_POOL_SIZE",
+ "value": "${DB_MAX_POOL_SIZE}"
+ },
+ {
+ "name": "DB_TX_ISOLATION",
+ "value": "${DB_TX_ISOLATION}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "DEFAULT_JOB_REPOSITORY",
+ "value": "${APPLICATION_NAME}-postgresql"
+ },
+ {
+ "name": "TIMER_SERVICE_DATA_STORE",
+ "value": "${APPLICATION_NAME}-postgresql"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}-postgresql"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "postgresql:${POSTGRESQL_IMAGE_STREAM_TAG}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}-postgresql",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "terminationGracePeriodSeconds": 60,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}-postgresql",
+ "image": "postgresql",
+ "imagePullPolicy": "Always",
+ "ports": [
+ {
+ "containerPort": 5432,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "POSTGRESQL_USER",
+ "value": "${DB_USERNAME}"
+ },
+ {
+ "name": "POSTGRESQL_PASSWORD",
+ "value": "${DB_PASSWORD}"
+ },
+ {
+ "name": "POSTGRESQL_DATABASE",
+ "value": "${DB_DATABASE}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_CONNECTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_MAX_PREPARED_TRANSACTIONS",
+ "value": "${POSTGRESQL_MAX_CONNECTIONS}"
+ },
+ {
+ "name": "POSTGRESQL_SHARED_BUFFERS",
+ "value": "${POSTGRESQL_SHARED_BUFFERS}"
+ }
+ ]
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-sso-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-sso-s2i.json
new file mode 100644
index 000000000..a7a347338
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-sso-s2i.json
@@ -0,0 +1,823 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 6 applications built using S2I, enabled for SSO.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 + Single Sign-On (with https)"
+ },
+ "name": "eap71-sso-s2i"
+ },
+ "labels": {
+ "template": "eap71-sso-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 based application with SSL and SSO support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets: \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Hostname for http service route (e.g. eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Hostname for https service route (e.g. secure-eap-app-myproject.example.com). Required for SSO-enabled applications. This is added to the white list of redirects in the SSO server.",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/redhat-developer/redhat-sso-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "7.0.x-ose",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Queues",
+ "description": "Queue names",
+ "name": "HORNETQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topics",
+ "description": "Topic names",
+ "name": "HORNETQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap7-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap7-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate (e.g. jboss)",
+ "name": "HTTPS_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. mykeystorepass)",
+ "name": "HTTPS_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "HornetQ Password",
+ "description": "HornetQ cluster admin password",
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "GitHub trigger secret",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "Generic build trigger secret",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the server certificate (e.g. secret-key)",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate (e.g. password)",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "JGroups cluster password",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "URL for SSO",
+ "description": "The URL for the SSO server (e.g. https://secure-sso-myproject.example.com/auth). This is the URL through which the user will be redirected when a login or token is required by the application.",
+ "name": "SSO_URL",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "URL for SSO (internal service)",
+ "description": "The URL for the internal SSO service, where secure-sso (the default) is the kubernetes service exposed by the SSO server. This is used to create the application client(s) (see SSO_USERNAME). This can also be the same as SSO_URL.",
+ "name": "SSO_SERVICE_URL",
+ "value": "https://secure-sso:8443/auth",
+ "required": false
+ },
+ {
+ "displayName": "SSO Realm",
+ "description": "The SSO realm to which the application client(s) should be associated (e.g. demo).",
+ "name": "SSO_REALM",
+ "value": "",
+ "required": true
+ },
+ {
+ "displayName": "SSO Username",
+ "description": "The username used to access the SSO service. This is used to create the appliction client(s) within the specified SSO realm. This should match the SSO_SERVICE_USERNAME specified through one of the sso70-* templates.",
+ "name": "SSO_USERNAME",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Password",
+ "description": "The password for the SSO service user.",
+ "name": "SSO_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Public Key",
+ "description": "SSO Public Key. Public key is recommended to be passed into the template to avoid man-in-the-middle security vulnerability",
+ "name": "SSO_PUBLIC_KEY",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Bearer Only?",
+ "description": "SSO Client Access Type",
+ "name": "SSO_BEARER_ONLY",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Artifact Directories",
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "app-jee-jsp/target,service-jee-jaxrs/target,app-profile-jee-jsp/target,app-profile-saml-jee-jsp/target",
+ "required": false
+ },
+ {
+ "displayName": "SSO SAML Keystore Secret",
+ "description": "The name of the secret containing the keystore file",
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "SSO SAML Keystore",
+ "description": "The name of the keystore file within the secret",
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "SSO SAML Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "SSO SAML Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "SSO Client Secret",
+ "description": "The SSO Client Secret for Confidential Access",
+ "name": "SSO_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Enable CORS for SSO?",
+ "description": "Enable CORS for SSO applications",
+ "name": "SSO_ENABLE_CORS",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "SSO SAML Logout Page",
+ "description": "SSO logout page for SAML applications",
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "/",
+ "required": false
+ },
+ {
+ "displayName": "Disable SSL Validation in EAP->SSO communication",
+ "description": "If true SSL communication between EAP and the SSO Server will be insecure (i.e. certificate validation is disabled with curl)",
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "true",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store",
+ "description": "The name of the truststore file within the secret (e.g. truststore.jks)",
+ "name": "SSO_TRUSTSTORE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Password",
+ "description": "The password for the truststore and certificate (e.g. mykeystorepass)",
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "SSO Trust Store Secret",
+ "description": "The name of the secret containing the truststore file (e.g. truststore-secret). Used for volume secretName",
+ "name": "SSO_TRUSTSTORE_SECRET",
+ "value": "eap7-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}"
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ },
+ "env": [
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ },
+ {
+ "name": "MAVEN_ARGS_APPEND",
+ "value": ""
+ },
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ }
+ ]
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "mountPath": "/etc/sso-saml-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "sso-truststore-volume",
+ "mountPath": "/etc/sso-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "HOSTNAME_HTTP",
+ "value": "${HOSTNAME_HTTP}"
+ },
+ {
+ "name": "HOSTNAME_HTTPS",
+ "value": "${HOSTNAME_HTTPS}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_CLUSTER_PASSWORD",
+ "value": "${HORNETQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "HORNETQ_QUEUES",
+ "value": "${HORNETQ_QUEUES}"
+ },
+ {
+ "name": "HORNETQ_TOPICS",
+ "value": "${HORNETQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ },
+ {
+ "name": "SSO_URL",
+ "value": "${SSO_URL}"
+ },
+ {
+ "name": "SSO_SERVICE_URL",
+ "value": "${SSO_SERVICE_URL}"
+ },
+ {
+ "name": "SSO_REALM",
+ "value": "${SSO_REALM}"
+ },
+ {
+ "name": "SSO_USERNAME",
+ "value": "${SSO_USERNAME}"
+ },
+ {
+ "name": "SSO_PASSWORD",
+ "value": "${SSO_PASSWORD}"
+ },
+ {
+ "name": "SSO_PUBLIC_KEY",
+ "value": "${SSO_PUBLIC_KEY}"
+ },
+ {
+ "name": "SSO_BEARER_ONLY",
+ "value": "${SSO_BEARER_ONLY}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_SECRET",
+ "value": "${SSO_SAML_KEYSTORE_SECRET}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE",
+ "value": "${SSO_SAML_KEYSTORE}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_DIR",
+ "value": "/etc/sso-saml-secret-volume"
+ },
+ {
+ "name": "SSO_SAML_CERTIFICATE_NAME",
+ "value": "${SSO_SAML_CERTIFICATE_NAME}"
+ },
+ {
+ "name": "SSO_SAML_KEYSTORE_PASSWORD",
+ "value": "${SSO_SAML_KEYSTORE_PASSWORD}"
+ },
+ {
+ "name": "SSO_SECRET",
+ "value": "${SSO_SECRET}"
+ },
+ {
+ "name": "SSO_ENABLE_CORS",
+ "value": "${SSO_ENABLE_CORS}"
+ },
+ {
+ "name": "SSO_SAML_LOGOUT_PAGE",
+ "value": "${SSO_SAML_LOGOUT_PAGE}"
+ },
+ {
+ "name": "SSO_DISABLE_SSL_CERTIFICATE_VALIDATION",
+ "value": "${SSO_DISABLE_SSL_CERTIFICATE_VALIDATION}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE",
+ "value": "${SSO_TRUSTSTORE}"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_DIR",
+ "value": "/etc/sso-secret-volume"
+ },
+ {
+ "name": "SSO_TRUSTSTORE_PASSWORD",
+ "value": "${SSO_TRUSTSTORE_PASSWORD}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "sso-saml-keystore-volume",
+ "secret": {
+ "secretName": "${SSO_SAML_KEYSTORE_SECRET}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ },
+ {
+ "name": "sso-truststore-volume",
+ "secret": {
+ "secretName": "${SSO_TRUSTSTORE_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-third-party-db-s2i.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-third-party-db-s2i.json
new file mode 100644
index 000000000..6dc63c75f
--- /dev/null
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/eap71-third-party-db-s2i.json
@@ -0,0 +1,657 @@
+{
+ "kind": "Template",
+ "apiVersion": "v1",
+ "metadata": {
+ "annotations": {
+ "iconClass": "icon-jboss",
+ "description": "Application template for EAP 7 DB applications built using S2I. Includes support for installing third-party DB drivers.",
+ "tags": "eap,javaee,java,jboss,xpaas",
+ "version": "1.4.0",
+ "openshift.io/display-name": "Red Hat JBoss EAP 7.1 (with https, supporting third-party DB drivers)"
+ },
+ "name": "eap71-third-party-db-s2i"
+ },
+ "labels": {
+ "template": "eap71-third-party-db-s2i",
+ "xpaas": "1.4.0"
+ },
+ "message": "A new EAP 7 based application with SSL support has been created in your project. Please be sure to create the \"${SERVICE_ACCOUNT_NAME}\" service account and the following secrets:\"${CONFIGURATION_NAME}\" containing the datasource configuration details required by the deployed application(s); \"${HTTPS_SECRET}\" containing the ${HTTPS_KEYSTORE} file used for serving secure content; \"${JGROUPS_ENCRYPT_SECRET}\" containing the ${JGROUPS_ENCRYPT_KEYSTORE} file used for securing JGroups communications.",
+ "parameters": [
+ {
+ "displayName": "Application Name",
+ "description": "The name for the application.",
+ "name": "APPLICATION_NAME",
+ "value": "eap-app",
+ "required": true
+ },
+ {
+ "displayName": "Configuration Secret Name",
+ "description": "The name of the secret containing configuration properties for the datasources.",
+ "name": "CONFIGURATION_NAME",
+ "value": "eap-app-config",
+ "required": true
+ },
+ {
+ "displayName": "Custom http Route Hostname",
+ "description": "Custom hostname for http service route. Leave blank for default hostname, e.g.: <application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTP",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Custom https Route Hostname",
+ "description": "Custom hostname for https service route. Leave blank for default hostname, e.g.: secure-<application-name>-<project>.<default-domain-suffix>",
+ "name": "HOSTNAME_HTTPS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Git Repository URL",
+ "description": "Git source URI for application",
+ "name": "SOURCE_REPOSITORY_URL",
+ "value": "https://github.com/jboss-openshift/openshift-quickstarts",
+ "required": true
+ },
+ {
+ "displayName": "Git Reference",
+ "description": "Git branch/tag reference",
+ "name": "SOURCE_REPOSITORY_REF",
+ "value": "master",
+ "required": false
+ },
+ {
+ "displayName": "Context Directory",
+ "description": "Path within Git project to build; empty for root project directory.",
+ "name": "CONTEXT_DIR",
+ "value": "datavirt/hibernate-webapp",
+ "required": false
+ },
+ {
+ "displayName": "Drivers ImageStreamTag",
+ "description": "ImageStreamTag definition for the image containing the drivers and configuration, e.g. jboss-datavirt63-openshift:1.0-driver",
+ "name": "EXTENSIONS_IMAGE",
+ "value": "jboss-datavirt63-driver-openshift:1.0",
+ "required": true
+ },
+ {
+ "displayName": "Drivers ImageStream Namespace",
+ "description": "Namespace within which the ImageStream definition for the image containing the drivers and configuration is located.",
+ "name": "EXTENSIONS_IMAGE_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "Drivers Image Install Directory",
+ "description": "Full path to the directory within the extensions image where the extensions are located (e.g. install.sh, modules/, etc.)",
+ "name": "EXTENSIONS_INSTALL_DIR",
+ "value": "/extensions",
+ "required": true
+ },
+ {
+ "displayName": "Queue Names",
+ "description": "Queue names to preconfigure within Messaging subsystem.",
+ "name": "MQ_QUEUES",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Topic Names",
+ "description": "Topic names to preconfigure within Messaging subsystem.",
+ "name": "MQ_TOPICS",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Service Account Name",
+ "description": "The name of the service account to use for the deployment. The service account should be configured to allow useage of the secret(s) specified by HTTPS_SECRET and JGROUPS_ENCRYPT_SECRET.",
+ "name": "SERVICE_ACCOUNT_NAME",
+ "value": "eap-service-account",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Secret Name",
+ "description": "The name of the secret containing the keystore file",
+ "name": "HTTPS_SECRET",
+ "value": "eap-app-secret",
+ "required": true
+ },
+ {
+ "displayName": "Server Keystore Filename",
+ "description": "The name of the keystore file within the secret",
+ "name": "HTTPS_KEYSTORE",
+ "value": "keystore.jks",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Type",
+ "description": "The type of the keystore file (JKS or JCEKS)",
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "",
+ "required": false
+ },
+ {
+ "displayName": "Server Certificate Name",
+ "description": "The name associated with the server certificate",
+ "name": "HTTPS_NAME",
+ "value": "jboss",
+ "required": false
+ },
+ {
+ "displayName": "Server Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "HTTPS_PASSWORD",
+ "value": "mykeystorepass",
+ "required": false
+ },
+ {
+ "displayName": "Messaging Cluster Admin Password",
+ "description": "Admin password for Messaging cluster.",
+ "name": "MQ_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Github Webhook Secret",
+ "description": "A secret string used to configure the GitHub webhook.",
+ "name": "GITHUB_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Generic Webhook Secret",
+ "description": "A secret string used to configure the Generic webhook.",
+ "name": "GENERIC_WEBHOOK_SECRET",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "ImageStream Namespace",
+ "description": "Namespace in which the ImageStreams for Red Hat Middleware images are installed. These ImageStreams are normally installed in the openshift namespace. You should only need to modify this if you've installed the ImageStreams in a different namespace/project.",
+ "name": "IMAGE_STREAM_NAMESPACE",
+ "value": "openshift",
+ "required": true
+ },
+ {
+ "displayName": "JGroups Secret Name",
+ "description": "The name of the secret containing the keystore to be used for securing JGroups communications.",
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "eap-app-secret",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Filename",
+ "description": "The name of the keystore file within the JGroups secret.",
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "jgroups.jceks",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Certificate Name",
+ "description": "The name associated with the JGroups server certificate",
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "secret-key",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Keystore Password",
+ "description": "The password for the keystore and certificate",
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "password",
+ "required": false
+ },
+ {
+ "displayName": "JGroups Cluster Password",
+ "description": "Password used by JGroups to authenticate nodes in the cluster.",
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "from": "[a-zA-Z0-9]{8}",
+ "generate": "expression",
+ "required": true
+ },
+ {
+ "displayName": "Deploy Exploded Archives",
+ "description": "Controls whether exploded deployment content should be automatically deployed",
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "false",
+ "required": false
+ },
+ {
+ "displayName": "Maven mirror URL",
+ "description": "Maven mirror to use for S2I builds",
+ "name": "MAVEN_MIRROR_URL",
+ "value": "",
+ "required": false
+ },
+ {
+ "description": "List of directories from which archives will be copied into the deployment folder. If unspecified, all archives in /target will be copied.",
+ "name": "ARTIFACT_DIR",
+ "value": "",
+ "required": false
+ }
+ ],
+ "objects": [
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8080,
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's http port."
+ }
+ }
+ },
+ {
+ "kind": "Service",
+ "apiVersion": "v1",
+ "spec": {
+ "ports": [
+ {
+ "port": 8443,
+ "targetPort": 8443
+ }
+ ],
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ }
+ },
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "The web server's https port."
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-http",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's http service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTP}",
+ "to": {
+ "name": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "Route",
+ "apiVersion": "v1",
+ "id": "${APPLICATION_NAME}-https",
+ "metadata": {
+ "name": "secure-${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ },
+ "annotations": {
+ "description": "Route for application's https service."
+ }
+ },
+ "spec": {
+ "host": "${HOSTNAME_HTTPS}",
+ "to": {
+ "name": "secure-${APPLICATION_NAME}"
+ },
+ "tls": {
+ "termination": "passthrough"
+ }
+ }
+ },
+ {
+ "kind": "ImageStream",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ }
+ },
+ {
+ "kind": "BuildConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "source": {
+ "type": "Git",
+ "git": {
+ "uri": "${SOURCE_REPOSITORY_URL}",
+ "ref": "${SOURCE_REPOSITORY_REF}"
+ },
+ "contextDir": "${CONTEXT_DIR}",
+ "images": [
+ {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ },
+ "paths": [
+ {
+ "destinationDir": "./${CONTEXT_DIR}/extensions/extras",
+ "sourcePath": "${EXTENSIONS_INSTALL_DIR}/."
+ }
+ ]
+ }
+ ]
+ },
+ "strategy": {
+ "type": "Source",
+ "sourceStrategy": {
+ "env": [
+ {
+ "name": "MAVEN_MIRROR_URL",
+ "value": "${MAVEN_MIRROR_URL}"
+ },
+ {
+ "name": "CUSTOM_INSTALL_DIRECTORIES",
+ "value": "extensions/*"
+ },
+ {
+ "name": "ARTIFACT_DIR",
+ "value": "${ARTIFACT_DIR}"
+ }
+ ],
+ "forcePull": true,
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${IMAGE_STREAM_NAMESPACE}",
+ "name": "jboss-eap71-openshift:1.0-TP"
+ }
+ }
+ },
+ "output": {
+ "to": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ },
+ "triggers": [
+ {
+ "type": "GitHub",
+ "github": {
+ "secret": "${GITHUB_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "Generic",
+ "generic": {
+ "secret": "${GENERIC_WEBHOOK_SECRET}"
+ }
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {}
+ },
+ {
+ "type": "ImageChange",
+ "imageChange": {
+ "from": {
+ "kind": "ImageStreamTag",
+ "namespace": "${EXTENSIONS_IMAGE_NAMESPACE}",
+ "name": "${EXTENSIONS_IMAGE}"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ]
+ }
+ },
+ {
+ "kind": "DeploymentConfig",
+ "apiVersion": "v1",
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "strategy": {
+ "type": "Recreate"
+ },
+ "triggers": [
+ {
+ "type": "ImageChange",
+ "imageChangeParams": {
+ "automatic": true,
+ "containerNames": [
+ "${APPLICATION_NAME}"
+ ],
+ "from": {
+ "kind": "ImageStreamTag",
+ "name": "${APPLICATION_NAME}:latest"
+ }
+ }
+ },
+ {
+ "type": "ConfigChange"
+ }
+ ],
+ "replicas": 1,
+ "selector": {
+ "deploymentConfig": "${APPLICATION_NAME}"
+ },
+ "template": {
+ "metadata": {
+ "name": "${APPLICATION_NAME}",
+ "labels": {
+ "deploymentConfig": "${APPLICATION_NAME}",
+ "application": "${APPLICATION_NAME}"
+ }
+ },
+ "spec": {
+ "serviceAccountName": "${SERVICE_ACCOUNT_NAME}",
+ "terminationGracePeriodSeconds": 75,
+ "containers": [
+ {
+ "name": "${APPLICATION_NAME}",
+ "image": "${APPLICATION_NAME}",
+ "imagePullPolicy": "Always",
+ "volumeMounts": [
+ {
+ "name": "configuration",
+ "mountPath": "/etc/eap-environment",
+ "readOnly": true
+ },
+ {
+ "name": "eap-keystore-volume",
+ "mountPath": "/etc/eap-secret-volume",
+ "readOnly": true
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "mountPath": "/etc/jgroups-encrypt-secret-volume",
+ "readOnly": true
+ }
+ ],
+ "lifecycle": {
+ "preStop": {
+ "exec": {
+ "command": [
+ "/opt/eap/bin/jboss-cli.sh",
+ "-c",
+ ":shutdown(timeout=60)"
+ ]
+ }
+ }
+ },
+ "livenessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/livenessProbe.sh"
+ ]
+ }
+ },
+ "readinessProbe": {
+ "exec": {
+ "command": [
+ "/bin/bash",
+ "-c",
+ "/opt/eap/bin/readinessProbe.sh"
+ ]
+ }
+ },
+ "ports": [
+ {
+ "name": "jolokia",
+ "containerPort": 8778,
+ "protocol": "TCP"
+ },
+ {
+ "name": "http",
+ "containerPort": 8080,
+ "protocol": "TCP"
+ },
+ {
+ "name": "https",
+ "containerPort": 8443,
+ "protocol": "TCP"
+ },
+ {
+ "name": "ping",
+ "containerPort": 8888,
+ "protocol": "TCP"
+ }
+ ],
+ "env": [
+ {
+ "name": "OPENSHIFT_KUBE_PING_LABELS",
+ "value": "application=${APPLICATION_NAME}"
+ },
+ {
+ "name": "OPENSHIFT_KUBE_PING_NAMESPACE",
+ "valueFrom": {
+ "fieldRef": {
+ "fieldPath": "metadata.namespace"
+ }
+ }
+ },
+ {
+ "name": "ENV_FILES",
+ "value": "/etc/eap-environment/*"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_DIR",
+ "value": "/etc/eap-secret-volume"
+ },
+ {
+ "name": "HTTPS_KEYSTORE",
+ "value": "${HTTPS_KEYSTORE}"
+ },
+ {
+ "name": "HTTPS_KEYSTORE_TYPE",
+ "value": "${HTTPS_KEYSTORE_TYPE}"
+ },
+ {
+ "name": "HTTPS_NAME",
+ "value": "${HTTPS_NAME}"
+ },
+ {
+ "name": "HTTPS_PASSWORD",
+ "value": "${HTTPS_PASSWORD}"
+ },
+ {
+ "name": "MQ_CLUSTER_PASSWORD",
+ "value": "${MQ_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "MQ_QUEUES",
+ "value": "${MQ_QUEUES}"
+ },
+ {
+ "name": "MQ_TOPICS",
+ "value": "${MQ_TOPICS}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_SECRET",
+ "value": "${JGROUPS_ENCRYPT_SECRET}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE_DIR",
+ "value": "/etc/jgroups-encrypt-secret-volume"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_KEYSTORE",
+ "value": "${JGROUPS_ENCRYPT_KEYSTORE}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_NAME",
+ "value": "${JGROUPS_ENCRYPT_NAME}"
+ },
+ {
+ "name": "JGROUPS_ENCRYPT_PASSWORD",
+ "value": "${JGROUPS_ENCRYPT_PASSWORD}"
+ },
+ {
+ "name": "JGROUPS_CLUSTER_PASSWORD",
+ "value": "${JGROUPS_CLUSTER_PASSWORD}"
+ },
+ {
+ "name": "AUTO_DEPLOY_EXPLODED",
+ "value": "${AUTO_DEPLOY_EXPLODED}"
+ }
+ ]
+ }
+ ],
+ "volumes": [
+ {
+ "name": "configuration",
+ "secret": {
+ "secretName": "${CONFIGURATION_NAME}"
+ }
+ },
+ {
+ "name": "eap-keystore-volume",
+ "secret": {
+ "secretName": "${HTTPS_SECRET}"
+ }
+ },
+ {
+ "name": "eap-jgroups-keystore-volume",
+ "secret": {
+ "secretName": "${JGROUPS_ENCRYPT_SECRET}"
+ }
+ }
+ ]
+ }
+ }
+ }
+ }
+ ]
+}
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json
index cd0bec3c1..aad649f84 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-amq-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "Camel route using ActiveMQ in Karaf container.",
+ "description": "Camel route using ActiveMQ in Karaf container. This quickstart shows how to use Camel in a Karaf Container using Blueprint to connect to the A-MQ xPaaS message broker on OpenShift that should already be installed, one simple way to run a A-MQ service is following the documentation of the A-MQ xPaaS image for OpenShift related to the amq62-basic template.",
"tags": "quickstart,java,karaf,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "karaf2-camel-amq-1.0.0.redhat-000010",
+ "value": "karaf2-camel-amq-1.0.0.redhat-000019",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -49,7 +49,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000010",
+ "value": "1.0.0.redhat-000019",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json
index 2ecce08a9..38b7bc249 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-log-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "A simple Camel route in Karaf container.",
+ "description": "A simple Camel route in Karaf container. This quickstart shows a simple Apache Camel application that logs a message to the server log every 5th second.",
"tags": "quickstart,java,karaf,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "karaf2-camel-log-1.0.0.redhat-000010",
+ "value": "karaf2-camel-log-1.0.0.redhat-000019",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -49,7 +49,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000010",
+ "value": "1.0.0.redhat-000019",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json
index d80939efb..6d9573e5b 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-camel-rest-sql-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "Camel example using Rest DSL with SQL Database in Karaf container.",
+ "description": "Camel example using Rest DSL with SQL Database in Karaf container. This example demonstrates how to use SQL via JDBC along with Camel's REST DSL to expose a RESTful API. The OpenShift MySQL container image should already be installed and running on your OpenShift installation, one simple way to run a MySQL service is following the documentation of the Openshift MySQL container image related to the mysql-ephemeral template..",
"tags": "quickstart,java,karaf,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "karaf2-camel-rest-sql-1.0.0.redhat-000010",
+ "value": "karaf2-camel-rest-sql-1.0.0.redhat-000019",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -73,7 +73,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000010",
+ "value": "1.0.0.redhat-000019",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json
index f99099868..fdc0c00e5 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/karaf2-cxf-rest-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "REST example using CXF in Karaf container.",
+ "description": "REST example using CXF in Karaf container. This quickstart demonstrates how to create a RESTful (JAX-RS) web service using CXF and expose it through the OSGi HTTP Service",
"tags": "quickstart,java,karaf,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "karaf2-cxf-rest-1.0.0.redhat-000010",
+ "value": "karaf2-cxf-rest-1.0.0.redhat-000019",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -49,7 +49,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000010",
+ "value": "1.0.0.redhat-000019",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json
index 8b3cd6ed0..2c1a73a29 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-amq-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first.",
+ "description": "Spring Boot, Camel and ActiveMQ QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to an ActiveMQ broker and use JMS messaging between two Camel routes using OpenShift. In this example we will use two containers, one container to run as a ActiveMQ broker, and another as a client to the broker, where the Camel routes are running. This quickstart requires the ActiveMQ broker has been deployed and running first, one simple way to run a A-MQ service is following the documentation of the A-MQ xPaaS image for OpenShift related to the amq62-basic template",
"tags": "quickstart,java,springboot,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-camel-amq-1.0.0.redhat-000055",
+ "value": "spring-boot-camel-amq-1.0.0.redhat-000064",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -60,7 +60,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000055",
+ "value": "1.0.0.redhat-000064",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json
index bc5bbad22..b62e768b6 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-config-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using Openshift ConfigMaps and Secrets.",
+ "description": "Spring Boot and Camel using ConfigMaps and Secrets. This quickstart demonstrates how to configure a Spring-Boot application using OpenShift ConfigMaps and Secrets. This example requires that a ConfigMap named camel-config and a Secret named camel-config are present in the namespace before the application is deployed, instruction about how to manually create them can be found here: https://github.com/fabric8-quickstarts/spring-boot-camel-config/blob/fis-2.0.x.redhat/README.redhat.md ",
"tags": "quickstart,java,springboot,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-camel-config-1.0.0.redhat-000005",
+ "value": "spring-boot-camel-config-1.0.0.redhat-000014",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -64,7 +64,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000005",
+ "value": "1.0.0.redhat-000014",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json
index e54fa0d59..91081e493 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-drools-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution.",
+ "description": "Spring-Boot, Camel and JBoss BRMS QuickStart. This example demonstrates how you can use Apache Camel and JBoss BRMS with Spring Boot on OpenShift. DRL files contain simple rules which are used to create knowledge session via Spring configuration file. Camel routes, defined via Spring as well, are then used to e.g. pass (insert) the Body of the message as a POJO to Drools engine for execution. A Kie Server should be deployed and configured before running the application, more information about how to configure it can be found at https://github.com/fabric8-quickstarts/spring-boot-camel-drools/blob/fis-2.0.x.redhat/README.redhat.md",
"tags": "quickstart,java,springboot,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-camel-drools-1.0.0.redhat-000054",
+ "value": "spring-boot-camel-drools-1.0.0.redhat-000063",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -63,7 +63,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000054",
+ "value": "1.0.0.redhat-000063",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json
index 20ba97dac..8d97400ab 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-infinispan-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first.",
+ "description": "Spring Boot, Camel and JBoss Data Grid QuickStart. This quickstart demonstrates how to connect a Spring-Boot application to a JBoss Data Grid (or Infinispan) server using the Hot Rod protocol. It requires that the data grid server (or cluster) has been deployed first, one simple way to run a JDG service is following the documentation of the JDG xPaaS image for OpenShift related to the datagrid65-basic template.",
"tags": "quickstart,java,springboot,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-camel-infinispan-1.0.0.redhat-000024",
+ "value": "spring-boot-camel-infinispan-1.0.0.redhat-000033",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -50,7 +50,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000024",
+ "value": "1.0.0.redhat-000033",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json
index 555647fab..bf722844c 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-rest-sql-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first.",
+ "description": "Spring Boot, Camel REST DSL and MySQL QuickStart. This quickstart demonstrates how to connect a Spring Boot application to a MySQL database and expose a REST API with Camel on OpenShift. In this example we will use two containers, one container to run as a MySQL server, and another as a client to the database, where the Camel routes are running. This quickstart requires the MySQL server to be deployed and started first, one simple way to run a MySQL service is following the documentation of the OpenShift MySQL container image related to the mysql-ephemeral template.",
"tags": "quickstart,java,springboot,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000055",
+ "value": "spring-boot-camel-rest-sql-1.0.0.redhat-000064",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -72,7 +72,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000055",
+ "value": "1.0.0.redhat-000064",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json
index cf9a4e903..856264615 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-teiid-template.json
@@ -3,7 +3,7 @@
"kind": "Template",
"metadata": {
"annotations": {
- "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol.",
+ "description": "Spring-Boot, Camel and JBoss Data Virtualization QuickStart. This example demonstrates how to connect Apache Camel to a remote JBoss Data Virtualization (or Teiid) Server using the JDBC protocol. This quickstart assumes that the JDV server is already running and configured on OpenShift, more information about to setup a JDV server can be found at https://github.com/fabric8-quickstarts/spring-boot-camel-teiid/blob/fis-2.0.x.redhat/README.redhat.md",
"tags": "quickstart,java,springboot,fis",
"iconClass": "icon-jboss",
"version": "2.0"
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-camel-teiid-1.0.0.redhat-000053",
+ "value": "spring-boot-camel-teiid-1.0.0.redhat-000062",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -68,7 +68,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000053",
+ "value": "1.0.0.redhat-000062",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json
index c78a96f7c..9c0fe287e 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-template.json
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-camel-1.0.0.redhat-000055",
+ "value": "spring-boot-camel-1.0.0.redhat-000064",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -43,7 +43,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000055",
+ "value": "1.0.0.redhat-000064",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json
index 620425902..87c0e347a 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-camel-xml-template.json
@@ -31,7 +31,7 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-camel-xml-1.0.0.redhat-000055",
+ "value": "spring-boot-camel-xml-1.0.0.redhat-000064",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
@@ -43,7 +43,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000055",
+ "value": "1.0.0.redhat-000064",
"description": "The application version."
},
{
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json
index 15cfc93fd..8b0261035 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxrs-template.json
@@ -31,10 +31,16 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000005",
+ "value": "spring-boot-cxf-jaxrs-1.0.0.redhat-000014",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "cxf-jaxrs",
+ "description": "Exposed service name."
+ },
+ {
"name": "BUILDER_VERSION",
"displayName": "Builder version",
"value": "2.0",
@@ -43,7 +49,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000005",
+ "value": "1.0.0.redhat-000014",
"description": "The application version."
},
{
@@ -93,6 +99,59 @@
],
"objects": [
{
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9413,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
diff --git a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json
index c70ee7726..8b36f5f0b 100644
--- a/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json
+++ b/roles/openshift_examples/files/examples/v3.7/xpaas-templates/spring-boot-cxf-jaxws-template.json
@@ -31,10 +31,16 @@
{
"name": "GIT_REF",
"displayName": "Git Reference",
- "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000005",
+ "value": "spring-boot-cxf-jaxws-1.0.0.redhat-000014",
"description": "Set this to a branch name, tag or other ref of your repository if you are not using the default branch."
},
{
+ "name": "SERVICE_NAME",
+ "displayName": "Service Name",
+ "value": "cxf-jaxws",
+ "description": "Exposed service name."
+ },
+ {
"name": "BUILDER_VERSION",
"displayName": "Builder version",
"value": "2.0",
@@ -43,7 +49,7 @@
{
"name": "APP_VERSION",
"displayName": "Application Version",
- "value": "1.0.0.redhat-000005",
+ "value": "1.0.0.redhat-000014",
"description": "The application version."
},
{
@@ -93,6 +99,59 @@
],
"objects": [
{
+ "apiVersion": "v1",
+ "kind": "Route",
+ "metadata": {
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}-route"
+ },
+ "spec": {
+ "to": {
+ "kind": "Service",
+ "name": "${SERVICE_NAME}"
+ }
+ }
+ },
+ {
+ "apiVersion": "v1",
+ "kind": "Service",
+ "metadata": {
+ "annotations": {
+ },
+ "labels": {
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "project": "${APP_NAME}",
+ "version": "${APP_VERSION}",
+ "group": "quickstarts"
+ },
+ "name": "${SERVICE_NAME}"
+ },
+ "spec": {
+ "clusterIP": "None",
+ "deprecatedPublicIPs": [],
+ "ports": [
+ {
+ "port": 9414,
+ "protocol": "TCP",
+ "targetPort": 8080
+ }
+ ],
+ "selector": {
+ "project": "${APP_NAME}",
+ "component": "${APP_NAME}",
+ "provider": "s2i",
+ "group": "quickstarts"
+ }
+ }
+ },
+ {
"kind": "ImageStream",
"apiVersion": "v1",
"metadata": {
diff --git a/roles/openshift_excluder/tasks/install.yml b/roles/openshift_excluder/tasks/install.yml
index 3a866cedf..7a5bebf6f 100644
--- a/roles/openshift_excluder/tasks/install.yml
+++ b/roles/openshift_excluder/tasks/install.yml
@@ -6,19 +6,46 @@
block:
- - name: Install docker excluder
+ - name: Install docker excluder - yum
package:
name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_docker_package_state }}"
when:
- r_openshift_excluder_enable_docker_excluder | bool
+ - ansible_pkg_mgr == "yum"
- - name: Install openshift excluder
+
+ # For DNF we do not need the "*" and if we add it, it causes an error because
+ # it's not a valid pkg_spec
+ #
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1199432
+ - name: Install docker excluder - dnf
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-docker-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: "{{ r_openshift_excluder_docker_package_state }}"
+ when:
+ - r_openshift_excluder_enable_docker_excluder | bool
+ - ansible_pkg_mgr == "dnf"
+
+ - name: Install openshift excluder - yum
package:
name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) + '*' }}"
state: "{{ r_openshift_excluder_package_state }}"
when:
- r_openshift_excluder_enable_openshift_excluder | bool
+ - ansible_pkg_mgr == "yum"
+
+ # For DNF we do not need the "*" and if we add it, it causes an error because
+ # it's not a valid pkg_spec
+ #
+ # https://bugzilla.redhat.com/show_bug.cgi?id=1199432
+ - name: Install openshift excluder - dnf
+ package:
+ name: "{{ r_openshift_excluder_service_type }}-excluder{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ state: "{{ r_openshift_excluder_package_state }}"
+ when:
+ - r_openshift_excluder_enable_openshift_excluder | bool
+ - ansible_pkg_mgr == "dnf"
- set_fact:
r_openshift_excluder_install_ran: True
diff --git a/roles/openshift_facts/library/openshift_facts.py b/roles/openshift_facts/library/openshift_facts.py
index 215ff4b72..33028fea4 100755
--- a/roles/openshift_facts/library/openshift_facts.py
+++ b/roles/openshift_facts/library/openshift_facts.py
@@ -498,6 +498,20 @@ def set_selectors(facts):
facts['hosted']['etcd'] = {}
if 'selector' not in facts['hosted']['etcd'] or facts['hosted']['etcd']['selector'] in [None, 'None']:
facts['hosted']['etcd']['selector'] = None
+ if 'prometheus' not in facts:
+ facts['prometheus'] = {}
+ if 'selector' not in facts['prometheus'] or facts['prometheus']['selector'] in [None, 'None']:
+ facts['prometheus']['selector'] = None
+ if 'alertmanager' not in facts['prometheus']:
+ facts['prometheus']['alertmanager'] = {}
+ # pylint: disable=line-too-long
+ if 'selector' not in facts['prometheus']['alertmanager'] or facts['prometheus']['alertmanager']['selector'] in [None, 'None']:
+ facts['prometheus']['alertmanager']['selector'] = None
+ if 'alertbuffer' not in facts['prometheus']:
+ facts['prometheus']['alertbuffer'] = {}
+ # pylint: disable=line-too-long
+ if 'selector' not in facts['prometheus']['alertbuffer'] or facts['prometheus']['alertbuffer']['selector'] in [None, 'None']:
+ facts['prometheus']['alertbuffer']['selector'] = None
return facts
@@ -1779,7 +1793,8 @@ class OpenShiftFacts(object):
'node',
'logging',
'loggingops',
- 'metrics']
+ 'metrics',
+ 'prometheus']
# Disabling too-many-arguments, this should be cleaned up as a TODO item.
# pylint: disable=too-many-arguments,no-value-for-parameter
@@ -1907,7 +1922,6 @@ class OpenShiftFacts(object):
portal_net='172.30.0.0/16',
client_binary='oc', admin_binary='oadm',
dns_domain='cluster.local',
- debug_level=2,
config_base='/etc/origin')
if 'master' in roles:
@@ -2069,6 +2083,66 @@ class OpenShiftFacts(object):
)
)
+ defaults['prometheus'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['prometheus']['alertmanager'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus-alertmanager',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
+ defaults['prometheus']['alertbuffer'] = dict(
+ storage=dict(
+ kind=None,
+ volume=dict(
+ name='prometheus-alertbuffer',
+ size='10Gi'
+ ),
+ nfs=dict(
+ directory='/exports',
+ options='*(rw,root_squash)'
+ ),
+ host=None,
+ access=dict(
+ modes=['ReadWriteOnce']
+ ),
+ create_pv=True,
+ create_pvc=False
+ )
+ )
+
return defaults
def guess_host_provider(self):
diff --git a/roles/openshift_gcp/templates/provision.j2.sh b/roles/openshift_gcp/templates/provision.j2.sh
index d72a11de1..4d150bc74 100644
--- a/roles/openshift_gcp/templates/provision.j2.sh
+++ b/roles/openshift_gcp/templates/provision.j2.sh
@@ -125,10 +125,11 @@ fi ) &
if ! gcloud --project "{{ openshift_gcp_project }}" compute instance-templates describe "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" &>/dev/null; then
gcloud --project "{{ openshift_gcp_project }}" compute instance-templates create "{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}" \
--machine-type "{{ node_group.machine_type }}" --network "{{ openshift_gcp_network_name }}" \
- --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ node_group.tags }}" \
+ --tags "{{ openshift_gcp_prefix }}ocp,ocp,{{ 'ocp-bootstrap,' if (node_group.bootstrap | default(False)) else '' }}{{ node_group.tags }}" \
--boot-disk-size "{{ node_group.boot_disk_size }}" --boot-disk-type "pd-ssd" \
--scopes "logging-write,monitoring-write,useraccounts-ro,service-control,service-management,storage-ro,compute-rw" \
- --image "${image}" ${metadata}
+ --image "{{ node_group.image | default('${image}') }}" ${metadata} \
+ --metadata "bootstrap={{ node_group.bootstrap | default(False) | bool | to_json }},cluster-id={{ openshift_gcp_prefix + openshift_gcp_clusterid }},node-group={{ node_group.name }}"
else
echo "Instance template '{{ openshift_gcp_prefix }}instance-template-{{ node_group.name }}' already exists"
fi
@@ -312,8 +313,12 @@ fi
# wait until all node groups are stable
{% for node_group in openshift_gcp_node_group_config %}
+{% if node_group.wait_for_stable | default(False) or not (node_group.bootstrap | default(False)) %}
# wait for stable {{ node_group.name }}
-( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=300) &
+( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed wait-until-stable "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --timeout=600 ) &
+{% else %}
+# not waiting for {{ node_group.name }} due to bootstrapping
+{% endif %}
{% endfor %}
diff --git a/roles/openshift_gcp/templates/remove.j2.sh b/roles/openshift_gcp/templates/remove.j2.sh
index a1e0affec..c9213b800 100644
--- a/roles/openshift_gcp/templates/remove.j2.sh
+++ b/roles/openshift_gcp/templates/remove.j2.sh
@@ -37,7 +37,7 @@ function teardown() {
# scale down {{ node_group.name }}
(
# performs a delete and scale down as one operation to ensure maximum parallelism
- if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' ); then
+ if ! instances=$( gcloud --project "{{ openshift_gcp_project }}" compute instance-groups managed list-instances "{{ openshift_gcp_prefix }}ig-{{ node_group.suffix }}" --zone "{{ openshift_gcp_zone }}" --format='value[terminator=","](instance)' 2>/dev/null ); then
exit 0
fi
instances="${instances%?}"
@@ -59,6 +59,21 @@ if gsutil ls -p "{{ openshift_gcp_project }}" "gs://{{ openshift_gcp_registry_bu
fi
) &
+# Project metadata prefixed with {{ openshift_gcp_prefix }}
+(
+ for key in $( gcloud --project "{{ openshift_gcp_project }}" compute project-info describe --flatten=commonInstanceMetadata.items[] '--format=value(commonInstanceMetadata.items.key)' ); do
+ if [[ "${key}" == "{{ openshift_gcp_prefix }}"* ]]; then
+ gcloud --project "{{ openshift_gcp_project }}" compute project-info remove-metadata "--keys=${key}"
+ fi
+ done
+) &
+
+# Instances and disks used for image building
+(
+ teardown "{{ openshift_gcp_prefix }}build-image-instance" compute instances --zone "{{ openshift_gcp_zone }}"
+ teardown "{{ openshift_gcp_prefix }}build-image-instance" compute disks --zone "{{ openshift_gcp_zone }}"
+) &
+
# DNS
(
dns_zone="{{ dns_managed_zone | default(openshift_gcp_prefix + 'managed-zone') }}"
@@ -152,5 +167,12 @@ for i in `jobs -p`; do wait $i; done
for i in `jobs -p`; do wait $i; done
+# Images specifically located under this cluster prefix family
+for name in $( gcloud --project "{{ openshift_gcp_project }}" compute images list "--filter=family={{ openshift_gcp_prefix }}images" '--format=value(name)' ); do
+ ( gcloud --project "{{ openshift_gcp_project }}" compute images delete "${name}" ) &
+done
+
# Network
-teardown "{{ openshift_gcp_network_name }}" compute networks
+( teardown "{{ openshift_gcp_network_name }}" compute networks ) &
+
+for i in `jobs -p`; do wait $i; done \ No newline at end of file
diff --git a/roles/openshift_health_checker/action_plugins/openshift_health_check.py b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
index 326176273..3ee3b132c 100644
--- a/roles/openshift_health_checker/action_plugins/openshift_health_check.py
+++ b/roles/openshift_health_checker/action_plugins/openshift_health_check.py
@@ -101,7 +101,8 @@ class ActionModule(ActionBase):
execute_module=self._execute_module,
tmp=tmp,
task_vars=task_vars,
- want_full_results=want_full_results
+ want_full_results=want_full_results,
+ templar=self._templar
)
return known_checks
diff --git a/roles/openshift_health_checker/openshift_checks/__init__.py b/roles/openshift_health_checker/openshift_checks/__init__.py
index ce05b44a4..b7b16e0ea 100644
--- a/roles/openshift_health_checker/openshift_checks/__init__.py
+++ b/roles/openshift_health_checker/openshift_checks/__init__.py
@@ -65,12 +65,15 @@ class OpenShiftCheck(object):
If the check can gather logs, tarballs, etc., do so when True; but no need to spend
the time if they're not wanted (won't be written to output directory).
"""
-
- def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False):
+ # pylint: disable=too-many-arguments
+ def __init__(self, execute_module=None, task_vars=None, tmp=None, want_full_results=False,
+ templar=None):
# store a method for executing ansible modules from the check
self._execute_module = execute_module
# the task variables and tmpdir passed into the health checker task
self.task_vars = task_vars or {}
+ # We may need to template some task_vars
+ self._templar = templar
self.tmp = tmp
# a boolean for disabling the gathering of results (files, computations) that won't
# actually be recorded/used
diff --git a/roles/openshift_health_checker/openshift_checks/disk_availability.py b/roles/openshift_health_checker/openshift_checks/disk_availability.py
index cdf56e959..87e6146d4 100644
--- a/roles/openshift_health_checker/openshift_checks/disk_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/disk_availability.py
@@ -1,6 +1,7 @@
"""Check that there is enough disk space in predefined paths."""
import tempfile
+import os.path
from openshift_checks import OpenShiftCheck, OpenShiftCheckException
@@ -15,31 +16,31 @@ class DiskAvailability(OpenShiftCheck):
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_disk_space_bytes = {
'/var': {
- 'masters': 40 * 10**9,
- 'nodes': 15 * 10**9,
- 'etcd': 20 * 10**9,
+ 'oo_masters_to_config': 40 * 10**9,
+ 'oo_nodes_to_config': 15 * 10**9,
+ 'oo_etcd_to_config': 20 * 10**9,
},
# Used to copy client binaries into,
# see roles/openshift_cli/library/openshift_container_binary_sync.py.
'/usr/local/bin': {
- 'masters': 1 * 10**9,
- 'nodes': 1 * 10**9,
- 'etcd': 1 * 10**9,
+ 'oo_masters_to_config': 1 * 10**9,
+ 'oo_nodes_to_config': 1 * 10**9,
+ 'oo_etcd_to_config': 1 * 10**9,
},
# Used as temporary storage in several cases.
tempfile.gettempdir(): {
- 'masters': 1 * 10**9,
- 'nodes': 1 * 10**9,
- 'etcd': 1 * 10**9,
+ 'oo_masters_to_config': 1 * 10**9,
+ 'oo_nodes_to_config': 1 * 10**9,
+ 'oo_etcd_to_config': 1 * 10**9,
},
}
# recommended disk space for each location under an upgrade context
recommended_disk_upgrade_bytes = {
'/var': {
- 'masters': 10 * 10**9,
- 'nodes': 5 * 10 ** 9,
- 'etcd': 5 * 10 ** 9,
+ 'oo_masters_to_config': 10 * 10**9,
+ 'oo_nodes_to_config': 5 * 10 ** 9,
+ 'oo_etcd_to_config': 5 * 10 ** 9,
},
}
@@ -61,9 +62,9 @@ class DiskAvailability(OpenShiftCheck):
number = float(user_config)
user_config = {
'/var': {
- 'masters': number,
- 'nodes': number,
- 'etcd': number,
+ 'oo_masters_to_config': number,
+ 'oo_nodes_to_config': number,
+ 'oo_etcd_to_config': number,
},
}
except TypeError:
@@ -121,11 +122,21 @@ class DiskAvailability(OpenShiftCheck):
return {}
+ def find_ansible_submounts(self, path):
+ """Return a list of ansible_mounts that are below the given path."""
+ base = os.path.join(path, "")
+ return [
+ mount
+ for mount in self.get_var("ansible_mounts")
+ if mount["mount"].startswith(base)
+ ]
+
def free_bytes(self, path):
"""Return the size available in path based on ansible_mounts."""
+ submounts = sum(mnt.get('size_available', 0) for mnt in self.find_ansible_submounts(path))
mount = self.find_ansible_mount(path)
try:
- return mount['size_available']
+ return mount['size_available'] + submounts
except KeyError:
raise OpenShiftCheckException(
'Unable to retrieve disk availability for "{path}".\n'
diff --git a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
index fa07c1dde..5beb20503 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_image_availability.py
@@ -1,5 +1,6 @@
"""Check that required Docker images are available."""
+from pipes import quote
from ansible.module_utils import six
from openshift_checks import OpenShiftCheck
from openshift_checks.mixins import DockerHostMixin
@@ -33,10 +34,44 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# we use python-docker-py to check local docker for images, and skopeo
# to look for images available remotely without waiting to pull them.
dependencies = ["python-docker-py", "skopeo"]
- skopeo_img_check_command = "timeout 10 skopeo inspect --tls-verify=false docker://{registry}/{image}"
+ # command for checking if remote registries have an image, without docker pull
+ skopeo_command = "timeout 10 skopeo inspect --tls-verify={tls} {creds} docker://{registry}/{image}"
+ skopeo_example_command = "skopeo inspect [--tls-verify=false] [--creds=<user>:<pass>] docker://<registry>/<image>"
def __init__(self, *args, **kwargs):
super(DockerImageAvailability, self).__init__(*args, **kwargs)
+
+ self.registries = dict(
+ # set of registries that need to be checked insecurely (note: not accounting for CIDR entries)
+ insecure=set(self.ensure_list("openshift_docker_insecure_registries")),
+ # set of registries that should never be queried even if given in the image
+ blocked=set(self.ensure_list("openshift_docker_blocked_registries")),
+ )
+
+ # ordered list of registries (according to inventory vars) that docker will try for unscoped images
+ regs = self.ensure_list("openshift_docker_additional_registries")
+ # currently one of these registries is added whether the user wants it or not.
+ deployment_type = self.get_var("openshift_deployment_type")
+ if deployment_type == "origin" and "docker.io" not in regs:
+ regs.append("docker.io")
+ elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
+ regs.append("registry.access.redhat.com")
+ self.registries["configured"] = regs
+
+ # for the oreg_url registry there may be credentials specified
+ components = self.get_var("oreg_url", default="").split('/')
+ self.registries["oreg"] = "" if len(components) < 3 else components[0]
+
+ # Retrieve and template registry credentials, if provided
+ self.skopeo_command_creds = ""
+ oreg_auth_user = self.get_var('oreg_auth_user', default='')
+ oreg_auth_password = self.get_var('oreg_auth_password', default='')
+ if oreg_auth_user != '' and oreg_auth_password != '':
+ if self._templar is not None:
+ oreg_auth_user = self._templar.template(oreg_auth_user)
+ oreg_auth_password = self._templar.template(oreg_auth_password)
+ self.skopeo_command_creds = "--creds={}:{}".format(quote(oreg_auth_user), quote(oreg_auth_password))
+
# record whether we could reach a registry or not (and remember results)
self.reachable_registries = {}
@@ -62,26 +97,25 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
if not missing_images:
return {}
- registries = self.known_docker_registries()
- if not registries:
- return {"failed": True, "msg": "Unable to retrieve any docker registries."}
-
- available_images = self.available_images(missing_images, registries)
+ available_images = self.available_images(missing_images)
unavailable_images = set(missing_images) - set(available_images)
if unavailable_images:
- registries = [
- reg if self.reachable_registries.get(reg, True) else reg + " (unreachable)"
- for reg in registries
- ]
+ unreachable = [reg for reg, reachable in self.reachable_registries.items() if not reachable]
+ unreachable_msg = "Failed connecting to: {}\n".format(", ".join(unreachable))
+ blocked_msg = "Blocked registries: {}\n".format(", ".join(self.registries["blocked"]))
msg = (
- "One or more required Docker images are not available:\n {}\n"
- "Configured registries: {}\n"
- "Checked by: {}"
+ "One or more required container images are not available:\n {missing}\n"
+ "Checked with: {cmd}\n"
+ "Default registries searched: {registries}\n"
+ "{blocked}"
+ "{unreachable}"
).format(
- ",\n ".join(sorted(unavailable_images)),
- ", ".join(registries),
- self.skopeo_img_check_command
+ missing=",\n ".join(sorted(unavailable_images)),
+ cmd=self.skopeo_example_command,
+ registries=", ".join(self.registries["configured"]),
+ blocked=blocked_msg if self.registries["blocked"] else "",
+ unreachable=unreachable_msg if unreachable else "",
)
return dict(failed=True, msg=msg)
@@ -114,7 +148,7 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# template for images that run on top of OpenShift
image_url = "{}/{}-{}:{}".format(image_info["namespace"], image_info["name"], "${component}", "${version}")
image_url = self.get_var("oreg_url", default="") or image_url
- if 'nodes' in host_groups:
+ if 'oo_nodes_to_config' in host_groups:
for suffix in NODE_IMAGE_SUFFIXES:
required.add(image_url.replace("${component}", suffix).replace("${version}", image_tag))
# The registry-console is for some reason not prefixed with ose- like the other components.
@@ -125,24 +159,23 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
# images for containerized components
if self.get_var("openshift", "common", "is_containerized"):
components = set()
- if 'nodes' in host_groups:
+ if 'oo_nodes_to_config' in host_groups:
components.update(["node", "openvswitch"])
- if 'masters' in host_groups: # name is "origin" or "ose"
+ if 'oo_masters_to_config' in host_groups: # name is "origin" or "ose"
components.add(image_info["name"])
for component in components:
required.add("{}/{}:{}".format(image_info["namespace"], component, image_tag))
- if 'etcd' in host_groups: # special case, note it is the same for origin/enterprise
+ if 'oo_etcd_to_config' in host_groups: # special case, note it is the same for origin/enterprise
required.add("registry.access.redhat.com/rhel7/etcd") # and no image tag
return required
def local_images(self, images):
"""Filter a list of images and return those available locally."""
- registries = self.known_docker_registries()
found_images = []
for image in images:
# docker could have the image name as-is or prefixed with any registry
- imglist = [image] + [reg + "/" + image for reg in registries]
+ imglist = [image] + [reg + "/" + image for reg in self.registries["configured"]]
if self.is_image_local(imglist):
found_images.append(image)
return found_images
@@ -152,37 +185,27 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
result = self.execute_module("docker_image_facts", {"name": image})
return bool(result.get("images")) and not result.get("failed")
- def known_docker_registries(self):
- """Build a list of docker registries available according to inventory vars."""
- regs = self.get_var("openshift_docker_additional_registries", default=[])
+ def ensure_list(self, registry_param):
+ """Return the task var as a list."""
# https://bugzilla.redhat.com/show_bug.cgi?id=1497274
- # if the result was a string type, place it into a list. We must do this
+ # If the result was a string type, place it into a list. We must do this
# as using list() on a string will split the string into its characters.
- if isinstance(regs, six.string_types):
- regs = [regs]
- else:
- # Otherwise cast to a list as was done previously
- regs = list(regs)
+ # Otherwise cast to a list as was done previously.
+ registry = self.get_var(registry_param, default=[])
+ if not isinstance(registry, six.string_types):
+ return list(registry)
+ return self.normalize(registry)
- deployment_type = self.get_var("openshift_deployment_type")
- if deployment_type == "origin" and "docker.io" not in regs:
- regs.append("docker.io")
- elif deployment_type == 'openshift-enterprise' and "registry.access.redhat.com" not in regs:
- regs.append("registry.access.redhat.com")
-
- return regs
-
- def available_images(self, images, default_registries):
+ def available_images(self, images):
"""Search remotely for images. Returns: list of images found."""
return [
image for image in images
- if self.is_available_skopeo_image(image, default_registries)
+ if self.is_available_skopeo_image(image)
]
- def is_available_skopeo_image(self, image, default_registries):
+ def is_available_skopeo_image(self, image):
"""Use Skopeo to determine if required image exists in known registry(s)."""
- registries = default_registries
-
+ registries = self.registries["configured"]
# If image already includes a registry, only use that.
# NOTE: This logic would incorrectly identify images that do not use a namespace, e.g.
# registry.access.redhat.com/rhel7 as if the registry were a namespace.
@@ -193,13 +216,18 @@ class DockerImageAvailability(DockerHostMixin, OpenShiftCheck):
registries = [registry]
for registry in registries:
+ if registry in self.registries["blocked"]:
+ continue # blocked will never be consulted
if registry not in self.reachable_registries:
self.reachable_registries[registry] = self.connect_to_registry(registry)
if not self.reachable_registries[registry]:
- continue
+ continue # do not keep trying unreachable registries
+
+ args = dict(registry=registry, image=image)
+ args["tls"] = "false" if registry in self.registries["insecure"] else "true"
+ args["creds"] = self.skopeo_command_creds if registry == self.registries["oreg"] else ""
- args = {"_raw_params": self.skopeo_img_check_command.format(registry=registry, image=image)}
- result = self.execute_module_with_retries("command", args)
+ result = self.execute_module_with_retries("command", {"_raw_params": self.skopeo_command.format(**args)})
if result.get("rc", 0) == 0 and not result.get("failed"):
return True
if result.get("rc") == 124: # RC 124 == timed out; mark unreachable
diff --git a/roles/openshift_health_checker/openshift_checks/docker_storage.py b/roles/openshift_health_checker/openshift_checks/docker_storage.py
index 0558ddf14..6808d8b2f 100644
--- a/roles/openshift_health_checker/openshift_checks/docker_storage.py
+++ b/roles/openshift_health_checker/openshift_checks/docker_storage.py
@@ -14,7 +14,7 @@ class DockerStorage(DockerHostMixin, OpenShiftCheck):
"""
name = "docker_storage"
- tags = ["pre-install", "health", "preflight"]
+ tags = ["health", "preflight"]
dependencies = ["python-docker-py"]
storage_drivers = ["devicemapper", "overlay", "overlay2"]
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
index b4c8957e9..8b20ccb49 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_traffic.py
@@ -12,7 +12,7 @@ class EtcdTraffic(OpenShiftCheck):
def is_active(self):
"""Skip hosts that do not have etcd in their group names."""
group_names = self.get_var("group_names", default=[])
- valid_group_names = "etcd" in group_names
+ valid_group_names = "oo_etcd_to_config" in group_names
version = self.get_major_minor_version(self.get_var("openshift_image_tag"))
valid_version = version in ((3, 4), (3, 5))
diff --git a/roles/openshift_health_checker/openshift_checks/etcd_volume.py b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
index 79955cb2f..3d75da6f9 100644
--- a/roles/openshift_health_checker/openshift_checks/etcd_volume.py
+++ b/roles/openshift_health_checker/openshift_checks/etcd_volume.py
@@ -15,7 +15,11 @@ class EtcdVolume(OpenShiftCheck):
etcd_mount_path = "/var/lib/etcd"
def is_active(self):
- etcd_hosts = self.get_var("groups", "etcd", default=[]) or self.get_var("groups", "masters", default=[]) or []
+ etcd_hosts = (
+ self.get_var("groups", "oo_etcd_to_config", default=[]) or
+ self.get_var("groups", "oo_masters_to_config", default=[]) or
+ []
+ )
is_etcd_host = self.get_var("ansible_host") in etcd_hosts
return super(EtcdVolume, self).is_active() and is_etcd_host
diff --git a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py
index d783e6760..e93cc9028 100644
--- a/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py
+++ b/roles/openshift_health_checker/openshift_checks/logging/fluentd_config.py
@@ -46,7 +46,7 @@ class FluentdConfig(LoggingCheck):
# if check is running on a master, retrieve all running pods
# and check any pod's container for the env var "USE_JOURNAL"
group_names = self.get_var("group_names")
- if "masters" in group_names:
+ if "oo_masters_to_config" in group_names:
use_journald = self.check_fluentd_env_var()
docker_info = self.execute_module("docker_info", {})
diff --git a/roles/openshift_health_checker/openshift_checks/memory_availability.py b/roles/openshift_health_checker/openshift_checks/memory_availability.py
index 765ba072d..e7a8ec976 100644
--- a/roles/openshift_health_checker/openshift_checks/memory_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/memory_availability.py
@@ -14,9 +14,9 @@ class MemoryAvailability(OpenShiftCheck):
# Values taken from the official installation documentation:
# https://docs.openshift.org/latest/install_config/install/prerequisites.html#system-requirements
recommended_memory_bytes = {
- "masters": 16 * GIB,
- "nodes": 8 * GIB,
- "etcd": 8 * GIB,
+ "oo_masters_to_config": 16 * GIB,
+ "oo_nodes_to_config": 8 * GIB,
+ "oo_etcd_to_config": 8 * GIB,
}
# https://access.redhat.com/solutions/3006511 physical RAM is partly reserved from memtotal
memtotal_adjustment = 1 * GIB
diff --git a/roles/openshift_health_checker/openshift_checks/mixins.py b/roles/openshift_health_checker/openshift_checks/mixins.py
index b90ebf6dd..cfbdea303 100644
--- a/roles/openshift_health_checker/openshift_checks/mixins.py
+++ b/roles/openshift_health_checker/openshift_checks/mixins.py
@@ -21,9 +21,11 @@ class DockerHostMixin(object):
def is_active(self):
"""Only run on hosts that depend on Docker."""
- is_containerized = self.get_var("openshift", "common", "is_containerized")
- is_node = "nodes" in self.get_var("group_names", default=[])
- return super(DockerHostMixin, self).is_active() and (is_containerized or is_node)
+ group_names = set(self.get_var("group_names", default=[]))
+ needs_docker = set(["oo_nodes_to_config"])
+ if self.get_var("openshift.common.is_containerized"):
+ needs_docker.update(["oo_masters_to_config", "oo_etcd_to_config"])
+ return super(DockerHostMixin, self).is_active() and bool(group_names.intersection(needs_docker))
def ensure_dependencies(self):
"""
diff --git a/roles/openshift_health_checker/openshift_checks/ovs_version.py b/roles/openshift_health_checker/openshift_checks/ovs_version.py
index 363c12def..416805c4d 100644
--- a/roles/openshift_health_checker/openshift_checks/ovs_version.py
+++ b/roles/openshift_health_checker/openshift_checks/ovs_version.py
@@ -24,7 +24,7 @@ class OvsVersion(NotContainerizedMixin, OpenShiftCheck):
def is_active(self):
"""Skip hosts that do not have package requirements."""
group_names = self.get_var("group_names", default=[])
- master_or_node = 'masters' in group_names or 'nodes' in group_names
+ master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names
return super(OvsVersion, self).is_active() and master_or_node
def run(self):
diff --git a/roles/openshift_health_checker/openshift_checks/package_availability.py b/roles/openshift_health_checker/openshift_checks/package_availability.py
index 21355c2f0..090e438ff 100644
--- a/roles/openshift_health_checker/openshift_checks/package_availability.py
+++ b/roles/openshift_health_checker/openshift_checks/package_availability.py
@@ -20,9 +20,9 @@ class PackageAvailability(NotContainerizedMixin, OpenShiftCheck):
packages = set()
- if "masters" in group_names:
+ if "oo_masters_to_config" in group_names:
packages.update(self.master_packages(rpm_prefix))
- if "nodes" in group_names:
+ if "oo_nodes_to_config" in group_names:
packages.update(self.node_packages(rpm_prefix))
args = {"packages": sorted(set(packages))}
diff --git a/roles/openshift_health_checker/openshift_checks/package_version.py b/roles/openshift_health_checker/openshift_checks/package_version.py
index d4aec3ed8..2f09b22fc 100644
--- a/roles/openshift_health_checker/openshift_checks/package_version.py
+++ b/roles/openshift_health_checker/openshift_checks/package_version.py
@@ -36,7 +36,7 @@ class PackageVersion(NotContainerizedMixin, OpenShiftCheck):
def is_active(self):
"""Skip hosts that do not have package requirements."""
group_names = self.get_var("group_names", default=[])
- master_or_node = 'masters' in group_names or 'nodes' in group_names
+ master_or_node = 'oo_masters_to_config' in group_names or 'oo_nodes_to_config' in group_names
return super(PackageVersion, self).is_active() and master_or_node
def run(self):
diff --git a/roles/openshift_health_checker/test/action_plugin_test.py b/roles/openshift_health_checker/test/action_plugin_test.py
index f14887303..40ad27d5d 100644
--- a/roles/openshift_health_checker/test/action_plugin_test.py
+++ b/roles/openshift_health_checker/test/action_plugin_test.py
@@ -94,6 +94,7 @@ def skipped(result):
{},
])
def test_action_plugin_missing_openshift_facts(plugin, task_vars, monkeypatch):
+ monkeypatch.setattr(plugin, 'load_known_checks', lambda *_: {})
monkeypatch.setattr('openshift_health_check.resolve_checks', lambda *args: ['fake_check'])
result = plugin.run(tmp=None, task_vars=task_vars)
diff --git a/roles/openshift_health_checker/test/disk_availability_test.py b/roles/openshift_health_checker/test/disk_availability_test.py
index 9ae679b79..7acdb40ec 100644
--- a/roles/openshift_health_checker/test/disk_availability_test.py
+++ b/roles/openshift_health_checker/test/disk_availability_test.py
@@ -4,11 +4,11 @@ from openshift_checks.disk_availability import DiskAvailability, OpenShiftCheckE
@pytest.mark.parametrize('group_names,is_active', [
- (['masters'], True),
- (['nodes'], True),
- (['etcd'], True),
- (['masters', 'nodes'], True),
- (['masters', 'etcd'], True),
+ (['oo_masters_to_config'], True),
+ (['oo_nodes_to_config'], True),
+ (['oo_etcd_to_config'], True),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], True),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], True),
([], False),
(['lb'], False),
(['nfs'], False),
@@ -39,7 +39,7 @@ def test_is_active(group_names, is_active):
])
def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
task_vars = dict(
- group_names=['masters'],
+ group_names=['oo_masters_to_config'],
ansible_mounts=ansible_mounts,
)
@@ -52,7 +52,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
@pytest.mark.parametrize('group_names,configured_min,ansible_mounts', [
(
- ['masters'],
+ ['oo_masters_to_config'],
0,
[{
'mount': '/',
@@ -60,7 +60,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
}],
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
0,
[{
'mount': '/',
@@ -68,7 +68,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
}],
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
[{
'mount': '/',
@@ -76,7 +76,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
}],
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
1, # configure lower threshold
[{
'mount': '/',
@@ -84,7 +84,7 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
}],
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
[{
# not enough space on / ...
@@ -96,6 +96,24 @@ def test_cannot_determine_available_disk(desc, ansible_mounts, expect_chunks):
'size_available': 20 * 10**9 + 1,
}],
),
+ (
+ ['oo_masters_to_config'],
+ 0,
+ [{
+ 'mount': '/',
+ 'size_available': 2 * 10**9,
+ }, { # not enough directly on /var
+ 'mount': '/var',
+ 'size_available': 10 * 10**9 + 1,
+ }, {
+ # but subdir mounts add up to enough
+ 'mount': '/var/lib/docker',
+ 'size_available': 20 * 10**9 + 1,
+ }, {
+ 'mount': '/var/lib/origin',
+ 'size_available': 20 * 10**9 + 1,
+ }],
+ ),
])
def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansible_mounts):
task_vars = dict(
@@ -104,15 +122,16 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
ansible_mounts=ansible_mounts,
)
- result = DiskAvailability(fake_execute_module, task_vars).run()
+ check = DiskAvailability(fake_execute_module, task_vars)
+ check.run()
- assert not result.get('failed', False)
+ assert not check.failures
@pytest.mark.parametrize('name,group_names,configured_min,ansible_mounts,expect_chunks', [
(
'test with no space available',
- ['masters'],
+ ['oo_masters_to_config'],
0,
[{
'mount': '/',
@@ -122,7 +141,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test with a higher configured required value',
- ['masters'],
+ ['oo_masters_to_config'],
100, # set a higher threshold
[{
'mount': '/',
@@ -132,7 +151,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test with 1GB available, but "0" GB space requirement',
- ['nodes'],
+ ['oo_nodes_to_config'],
0,
[{
'mount': '/',
@@ -142,7 +161,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test with no space available, but "0" GB space requirement',
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
[{
'mount': '/',
@@ -152,7 +171,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test with enough space for a node, but not for a master',
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
0,
[{
'mount': '/',
@@ -162,7 +181,7 @@ def test_succeeds_with_recommended_disk_space(group_names, configured_min, ansib
),
(
'test failure with enough space on "/", but not enough on "/var"',
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
[{
# enough space on / ...
@@ -194,7 +213,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
@pytest.mark.parametrize('name,group_names,context,ansible_mounts,failed,extra_words', [
(
'test without enough space for master under "upgrade" context',
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
"upgrade",
[{
'mount': '/',
@@ -206,7 +225,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
),
(
'test with enough space for master under "upgrade" context',
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
"upgrade",
[{
'mount': '/',
@@ -218,7 +237,7 @@ def test_fails_with_insufficient_disk_space(name, group_names, configured_min, a
),
(
'test with not enough space for master, and non-upgrade context',
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
"health",
[{
'mount': '/',
diff --git a/roles/openshift_health_checker/test/docker_image_availability_test.py b/roles/openshift_health_checker/test/docker_image_availability_test.py
index c523ffd5c..dec99e5db 100644
--- a/roles/openshift_health_checker/test/docker_image_availability_test.py
+++ b/roles/openshift_health_checker/test/docker_image_availability_test.py
@@ -16,19 +16,19 @@ def task_vars():
),
openshift_deployment_type='origin',
openshift_image_tag='',
- group_names=['nodes', 'masters'],
+ group_names=['oo_nodes_to_config', 'oo_masters_to_config'],
)
@pytest.mark.parametrize('deployment_type, is_containerized, group_names, expect_active', [
- ("origin", True, [], True),
- ("openshift-enterprise", True, [], True),
("invalid", True, [], False),
("", True, [], False),
("origin", False, [], False),
("openshift-enterprise", False, [], False),
- ("origin", False, ["nodes", "masters"], True),
- ("openshift-enterprise", False, ["etcd"], False),
+ ("origin", False, ["oo_nodes_to_config", "oo_masters_to_config"], True),
+ ("openshift-enterprise", False, ["oo_etcd_to_config"], False),
+ ("origin", True, ["nfs"], False),
+ ("openshift-enterprise", True, ["lb"], False),
])
def test_is_active(task_vars, deployment_type, is_containerized, group_names, expect_active):
task_vars['openshift_deployment_type'] = deployment_type
@@ -98,40 +98,7 @@ def test_all_images_unavailable(task_vars):
actual = check.run()
assert actual['failed']
- assert "required Docker images are not available" in actual['msg']
-
-
-def test_no_known_registries():
- def execute_module(module_name=None, *_):
- if module_name == "command":
- return {
- 'failed': True,
- }
-
- return {
- 'changed': False,
- }
-
- def mock_known_docker_registries():
- return []
-
- dia = DockerImageAvailability(execute_module, task_vars=dict(
- openshift=dict(
- common=dict(
- service_type='origin',
- is_containerized=False,
- is_atomic=False,
- )
- ),
- openshift_docker_additional_registries=["docker.io"],
- openshift_deployment_type="openshift-enterprise",
- openshift_image_tag='latest',
- group_names=['nodes', 'masters'],
- ))
- dia.known_docker_registries = mock_known_docker_registries
- actual = dia.run()
- assert actual['failed']
- assert "Unable to retrieve any docker registries." in actual['msg']
+ assert "required container images are not available" in actual['msg']
@pytest.mark.parametrize("message,extra_words", [
@@ -172,13 +139,13 @@ def test_skopeo_update_failure(task_vars, message, extra_words):
"spam/eggs:v1", ["test.reg"],
True, True,
False,
- {"test.reg": False},
+ {"test.reg": False, "docker.io": False},
),
(
"spam/eggs:v1", ["test.reg"],
False, True,
False,
- {"test.reg": True},
+ {"test.reg": True, "docker.io": True},
),
(
"eggs.reg/spam/eggs:v1", ["test.reg"],
@@ -195,17 +162,19 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
elif module_name == "command":
return dict(msg="msg", failed=skopeo_failed)
- check = DockerImageAvailability(execute_module, task_vars())
+ tv = task_vars()
+ tv.update({"openshift_docker_additional_registries": registries})
+ check = DockerImageAvailability(execute_module, tv)
check._module_retry_interval = 0
- available = check.is_available_skopeo_image(image, registries)
+ available = check.is_available_skopeo_image(image)
assert available == expect_success
assert expect_registries_reached == check.reachable_registries
@pytest.mark.parametrize("deployment_type, is_containerized, groups, oreg_url, expected", [
( # standard set of stuff required on nodes
- "origin", False, ['nodes'], None,
+ "origin", False, ['oo_nodes_to_config'], "",
set([
'openshift/origin-pod:vtest',
'openshift/origin-deployer:vtest',
@@ -215,7 +184,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
])
),
( # set a different URL for images
- "origin", False, ['nodes'], 'foo.io/openshift/origin-${component}:${version}',
+ "origin", False, ['oo_nodes_to_config'], 'foo.io/openshift/origin-${component}:${version}',
set([
'foo.io/openshift/origin-pod:vtest',
'foo.io/openshift/origin-deployer:vtest',
@@ -225,7 +194,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
])
),
(
- "origin", True, ['nodes', 'masters', 'etcd'], None,
+ "origin", True, ['oo_nodes_to_config', 'oo_masters_to_config', 'oo_etcd_to_config'], "",
set([
# images running on top of openshift
'openshift/origin-pod:vtest',
@@ -241,7 +210,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
])
),
( # enterprise images
- "openshift-enterprise", True, ['nodes'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ "openshift-enterprise", True, ['oo_nodes_to_config'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'foo.io/openshift3/ose-pod:f13ac45',
'foo.io/openshift3/ose-deployer:f13ac45',
@@ -255,7 +224,7 @@ def test_registry_availability(image, registries, connection_test_failed, skopeo
])
),
(
- "openshift-enterprise", True, ['etcd', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
+ "openshift-enterprise", True, ['oo_etcd_to_config', 'lb'], 'foo.io/openshift3/ose-${component}:f13ac45',
set([
'registry.access.redhat.com/rhel7/etcd',
# lb does not yet come in a containerized version
@@ -288,7 +257,7 @@ def test_containerized_etcd():
),
),
openshift_deployment_type="origin",
- group_names=['etcd'],
+ group_names=['oo_etcd_to_config'],
)
expected = set(['registry.access.redhat.com/rhel7/etcd'])
assert expected == DockerImageAvailability(task_vars=task_vars).required_images()
diff --git a/roles/openshift_health_checker/test/docker_storage_test.py b/roles/openshift_health_checker/test/docker_storage_test.py
index e0dccc062..8fa68c378 100644
--- a/roles/openshift_health_checker/test/docker_storage_test.py
+++ b/roles/openshift_health_checker/test/docker_storage_test.py
@@ -5,9 +5,9 @@ from openshift_checks.docker_storage import DockerStorage
@pytest.mark.parametrize('is_containerized, group_names, is_active', [
- (False, ["masters", "etcd"], False),
- (False, ["masters", "nodes"], True),
- (True, ["etcd"], True),
+ (False, ["oo_masters_to_config", "oo_etcd_to_config"], False),
+ (False, ["oo_masters_to_config", "oo_nodes_to_config"], True),
+ (True, ["oo_etcd_to_config"], True),
])
def test_is_active(is_containerized, group_names, is_active):
task_vars = dict(
diff --git a/roles/openshift_health_checker/test/etcd_traffic_test.py b/roles/openshift_health_checker/test/etcd_traffic_test.py
index fae3e578d..dd6f4ad81 100644
--- a/roles/openshift_health_checker/test/etcd_traffic_test.py
+++ b/roles/openshift_health_checker/test/etcd_traffic_test.py
@@ -4,14 +4,14 @@ from openshift_checks.etcd_traffic import EtcdTraffic
@pytest.mark.parametrize('group_names,version,is_active', [
- (['masters'], "3.5", False),
- (['masters'], "3.6", False),
- (['nodes'], "3.4", False),
- (['etcd'], "3.4", True),
- (['etcd'], "1.5", True),
- (['etcd'], "3.1", False),
- (['masters', 'nodes'], "3.5", False),
- (['masters', 'etcd'], "3.5", True),
+ (['oo_masters_to_config'], "3.5", False),
+ (['oo_masters_to_config'], "3.6", False),
+ (['oo_nodes_to_config'], "3.4", False),
+ (['oo_etcd_to_config'], "3.4", True),
+ (['oo_etcd_to_config'], "1.5", True),
+ (['oo_etcd_to_config'], "3.1", False),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], "3.5", False),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], "3.5", True),
([], "3.4", False),
])
def test_is_active(group_names, version, is_active):
@@ -23,9 +23,9 @@ def test_is_active(group_names, version, is_active):
@pytest.mark.parametrize('group_names,matched,failed,extra_words', [
- (["masters"], True, True, ["Higher than normal", "traffic"]),
- (["masters", "etcd"], False, False, []),
- (["etcd"], False, False, []),
+ (["oo_masters_to_config"], True, True, ["Higher than normal", "traffic"]),
+ (["oo_masters_to_config", "oo_etcd_to_config"], False, False, []),
+ (["oo_etcd_to_config"], False, False, []),
])
def test_log_matches_high_traffic_msg(group_names, matched, failed, extra_words):
def execute_module(module_name, *_):
diff --git a/roles/openshift_health_checker/test/fluentd_config_test.py b/roles/openshift_health_checker/test/fluentd_config_test.py
index 10db253bc..b5b4858d6 100644
--- a/roles/openshift_health_checker/test/fluentd_config_test.py
+++ b/roles/openshift_health_checker/test/fluentd_config_test.py
@@ -82,7 +82,7 @@ def test_check_logging_config_non_master(name, use_journald, logging_driver, ext
return {}
task_vars = dict(
- group_names=["nodes", "etcd"],
+ group_names=["oo_nodes_to_config", "oo_etcd_to_config"],
openshift_logging_fluentd_use_journal=use_journald,
openshift=dict(
common=dict(config_base=""),
@@ -128,7 +128,7 @@ def test_check_logging_config_non_master_failed(name, use_journald, logging_driv
return {}
task_vars = dict(
- group_names=["nodes", "etcd"],
+ group_names=["oo_nodes_to_config", "oo_etcd_to_config"],
openshift_logging_fluentd_use_journal=use_journald,
openshift=dict(
common=dict(config_base=""),
@@ -192,7 +192,7 @@ def test_check_logging_config_master(name, pods, logging_driver, extra_words):
return {}
task_vars = dict(
- group_names=["masters"],
+ group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
@@ -274,7 +274,7 @@ def test_check_logging_config_master_failed(name, pods, logging_driver, words):
return {}
task_vars = dict(
- group_names=["masters"],
+ group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
@@ -331,7 +331,7 @@ def test_check_logging_config_master_fails_on_unscheduled_deployment(name, pods,
return {}
task_vars = dict(
- group_names=["masters"],
+ group_names=["oo_masters_to_config"],
openshift=dict(
common=dict(config_base=""),
),
diff --git a/roles/openshift_health_checker/test/memory_availability_test.py b/roles/openshift_health_checker/test/memory_availability_test.py
index aee2f0416..5ec83dd79 100644
--- a/roles/openshift_health_checker/test/memory_availability_test.py
+++ b/roles/openshift_health_checker/test/memory_availability_test.py
@@ -4,11 +4,11 @@ from openshift_checks.memory_availability import MemoryAvailability
@pytest.mark.parametrize('group_names,is_active', [
- (['masters'], True),
- (['nodes'], True),
- (['etcd'], True),
- (['masters', 'nodes'], True),
- (['masters', 'etcd'], True),
+ (['oo_masters_to_config'], True),
+ (['oo_nodes_to_config'], True),
+ (['oo_etcd_to_config'], True),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], True),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], True),
([], False),
(['lb'], False),
(['nfs'], False),
@@ -22,32 +22,32 @@ def test_is_active(group_names, is_active):
@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb', [
(
- ['masters'],
+ ['oo_masters_to_config'],
0,
17200,
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
0,
8200,
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
1, # configure lower threshold
2000, # too low for recommended but not for configured
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
2, # configure threshold where adjustment pushes it over
1900,
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
8200,
),
(
- ['masters', 'nodes'],
+ ['oo_masters_to_config', 'oo_nodes_to_config'],
0,
17000,
),
@@ -66,43 +66,43 @@ def test_succeeds_with_recommended_memory(group_names, configured_min, ansible_m
@pytest.mark.parametrize('group_names,configured_min,ansible_memtotal_mb,extra_words', [
(
- ['masters'],
+ ['oo_masters_to_config'],
0,
0,
['0.0 GiB'],
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
0,
100,
['0.1 GiB'],
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
24, # configure higher threshold
20 * 1024, # enough to meet recommended but not configured
['20.0 GiB'],
),
(
- ['nodes'],
+ ['oo_nodes_to_config'],
24, # configure higher threshold
22 * 1024, # not enough for adjustment to push over threshold
['22.0 GiB'],
),
(
- ['etcd'],
+ ['oo_etcd_to_config'],
0,
6 * 1024,
['6.0 GiB'],
),
(
- ['etcd', 'masters'],
+ ['oo_etcd_to_config', 'oo_masters_to_config'],
0,
9 * 1024, # enough memory for etcd, not enough for a master
['9.0 GiB'],
),
(
- ['nodes', 'masters'],
+ ['oo_nodes_to_config', 'oo_masters_to_config'],
0,
# enough memory for a node, not enough for a master
11 * 1024,
diff --git a/roles/openshift_health_checker/test/ovs_version_test.py b/roles/openshift_health_checker/test/ovs_version_test.py
index 602f32989..5a82a43bf 100644
--- a/roles/openshift_health_checker/test/ovs_version_test.py
+++ b/roles/openshift_health_checker/test/ovs_version_test.py
@@ -67,14 +67,14 @@ def test_ovs_package_version(openshift_release, expected_ovs_version):
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
- (['masters'], False, True),
+ (['oo_masters_to_config'], False, True),
# ensure check is skipped on containerized installs
- (['masters'], True, False),
- (['nodes'], False, True),
- (['masters', 'nodes'], False, True),
- (['masters', 'etcd'], False, True),
+ (['oo_masters_to_config'], True, False),
+ (['oo_nodes_to_config'], False, True),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], False, True),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], False, True),
([], False, False),
- (['etcd'], False, False),
+ (['oo_etcd_to_config'], False, False),
(['lb'], False, False),
(['nfs'], False, False),
])
diff --git a/roles/openshift_health_checker/test/package_availability_test.py b/roles/openshift_health_checker/test/package_availability_test.py
index b34e8fbfc..9815acb38 100644
--- a/roles/openshift_health_checker/test/package_availability_test.py
+++ b/roles/openshift_health_checker/test/package_availability_test.py
@@ -26,7 +26,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
(
dict(
openshift=dict(common=dict(service_type='origin')),
- group_names=['masters'],
+ group_names=['oo_masters_to_config'],
),
set(['origin-master']),
set(['origin-node']),
@@ -34,7 +34,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
(
dict(
openshift=dict(common=dict(service_type='atomic-openshift')),
- group_names=['nodes'],
+ group_names=['oo_nodes_to_config'],
),
set(['atomic-openshift-node']),
set(['atomic-openshift-master']),
@@ -42,7 +42,7 @@ def test_is_active(pkg_mgr, is_containerized, is_active):
(
dict(
openshift=dict(common=dict(service_type='atomic-openshift')),
- group_names=['masters', 'nodes'],
+ group_names=['oo_masters_to_config', 'oo_nodes_to_config'],
),
set(['atomic-openshift-master', 'atomic-openshift-node']),
set(),
diff --git a/roles/openshift_health_checker/test/package_version_test.py b/roles/openshift_health_checker/test/package_version_test.py
index 8564cd4db..3cf4ce033 100644
--- a/roles/openshift_health_checker/test/package_version_test.py
+++ b/roles/openshift_health_checker/test/package_version_test.py
@@ -97,14 +97,14 @@ def test_docker_package_version(deployment_type, openshift_release, expected_doc
@pytest.mark.parametrize('group_names,is_containerized,is_active', [
- (['masters'], False, True),
+ (['oo_masters_to_config'], False, True),
# ensure check is skipped on containerized installs
- (['masters'], True, False),
- (['nodes'], False, True),
- (['masters', 'nodes'], False, True),
- (['masters', 'etcd'], False, True),
+ (['oo_masters_to_config'], True, False),
+ (['oo_nodes_to_config'], False, True),
+ (['oo_masters_to_config', 'oo_nodes_to_config'], False, True),
+ (['oo_masters_to_config', 'oo_etcd_to_config'], False, True),
([], False, False),
- (['etcd'], False, False),
+ (['oo_etcd_to_config'], False, False),
(['lb'], False, False),
(['nfs'], False, False),
])
diff --git a/roles/openshift_hosted/defaults/main.yml b/roles/openshift_hosted/defaults/main.yml
index c234c3740..589ad3f51 100644
--- a/roles/openshift_hosted/defaults/main.yml
+++ b/roles/openshift_hosted/defaults/main.yml
@@ -69,7 +69,6 @@ r_openshift_hosted_registry_use_firewalld: "{{ os_firewall_use_firewalld | defau
openshift_hosted_registry_name: docker-registry
openshift_hosted_registry_wait: "{{ not (openshift_master_bootstrap_enabled | default(False)) }}"
-registry_volume_claim: 'registry-claim'
openshift_hosted_registry_cert_expire_days: 730
r_openshift_hosted_registry_os_firewall_deny: []
diff --git a/roles/openshift_hosted/templates/registry_config.j2 b/roles/openshift_hosted/templates/registry_config.j2
index eae8b328e..222b63b8a 100644
--- a/roles/openshift_hosted/templates/registry_config.j2
+++ b/roles/openshift_hosted/templates/registry_config.j2
@@ -53,7 +53,7 @@ storage:
{% if openshift_hosted_registry_storage_swift_domain is defined %}
domain: {{ openshift_hosted_registry_storage_swift_domain }}
{% endif -%}
-{% if openshift_hosted_registry_storage_swift_domainid %}
+{% if openshift_hosted_registry_storage_swift_domainid is defined %}
domainid: {{ openshift_hosted_registry_storage_swift_domainid }}
{% endif -%}
{% elif openshift_hosted_registry_storage_provider | default('') == 'gcs' %}
@@ -63,7 +63,7 @@ storage:
keyfile: /etc/registry/gcs.json
{% endif -%}
{% if openshift_hosted_registry_storage_gcs_rootdirectory is defined %}
- rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory }}
+ rootdirectory: {{ openshift_hosted_registry_storage_gcs_rootdirectory | default('/registry') }}
{% endif -%}
{% endif -%}
auth:
diff --git a/roles/openshift_hosted_facts/tasks/main.yml b/roles/openshift_hosted_facts/tasks/main.yml
index 47dc9171d..8fc70cecb 100644
--- a/roles/openshift_hosted_facts/tasks/main.yml
+++ b/roles/openshift_hosted_facts/tasks/main.yml
@@ -16,4 +16,4 @@
| oo_openshift_env }}"
openshift_env_structures:
- 'openshift.hosted.router.*'
- with_items: [hosted, logging, loggingops, metrics]
+ with_items: [hosted, logging, loggingops, metrics, prometheus]
diff --git a/roles/openshift_logging/README.md b/roles/openshift_logging/README.md
index 829c78728..6c5bb8693 100644
--- a/roles/openshift_logging/README.md
+++ b/roles/openshift_logging/README.md
@@ -44,23 +44,23 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_curator_run_timezone`: The timezone that Curator uses for figuring out its run time. Defaults to 'UTC'.
- `openshift_logging_curator_script_log_level`: The script log level for Curator. Defaults to 'INFO'.
- `openshift_logging_curator_log_level`: The log level for the Curator process. Defaults to 'ERROR'.
-- `openshift_logging_curator_cpu_limit`: The amount of CPU to allocate to Curator. Default is '100m'.
+- `openshift_logging_curator_cpu_request`: The minimum amount of CPU to allocate to Curator. Default is '100m'.
- `openshift_logging_curator_memory_limit`: The amount of memory to allocate to Curator. Unset if not specified.
- `openshift_logging_curator_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the curator pod will land.
- `openshift_logging_image_pull_secret`: The name of an existing pull secret to link to the logging service accounts
- `openshift_logging_kibana_hostname`: The Kibana hostname. Defaults to 'kibana.example.com'.
-- `openshift_logging_kibana_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_proxy_debug`: When "True", set the Kibana Proxy log level to DEBUG. Defaults to 'false'.
-- `openshift_logging_kibana_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_replica_count`: The number of replicas Kibana should be scaled up to. Defaults to 1.
- `openshift_logging_kibana_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
- `openshift_logging_kibana_edge_term_policy`: Insecure Edge Termination Policy. Defaults to Redirect.
- `openshift_logging_fluentd_nodeselector`: The node selector that the Fluentd daemonset uses to determine where to deploy to. Defaults to '"logging-infra-fluentd": "true"'.
-- `openshift_logging_fluentd_cpu_limit`: The CPU limit for Fluentd pods. Defaults to '100m'.
+- `openshift_logging_fluentd_cpu_request`: The minimum amount of CPU to allocate for Fluentd collector pods. Defaults to '100m'.
- `openshift_logging_fluentd_memory_limit`: The memory limit for Fluentd pods. Defaults to '512Mi'.
- `openshift_logging_fluentd_use_journal`: *DEPRECATED - DO NOT USE* Fluentd will automatically detect whether or not Docker is using the journald log driver.
- `openshift_logging_fluentd_journal_read_from_head`: If empty, Fluentd will use its internal default, which is false.
@@ -69,6 +69,9 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_fluentd_buffer_size_limit`: Buffer chunk limit for Fluentd. Defaults to 1m.
- `openshift_logging_fluentd_file_buffer_limit`: Fluentd will set the value to the file buffer limit. Defaults to '1Gi' per destination.
+- `openshift_logging_fluentd_audit_container_engine`: When `openshift_logging_fluentd_audit_container_engine` is set to `True`, the audit log of the container engine will be collected and stored in ES.
+- `openshift_logging_fluentd_audit_file`: Location of audit log file. The default is `/var/log/audit/audit.log`
+- `openshift_logging_fluentd_audit_pos_file`: Location of fluentd in_tail position file for the audit log file. The default is `/var/log/audit/audit.log.pos`
- `openshift_logging_es_host`: The name of the ES service Fluentd should send logs to. Defaults to 'logging-es'.
- `openshift_logging_es_port`: The port for the ES service Fluentd should sent its logs to. Defaults to '9200'.
@@ -77,7 +80,7 @@ When `openshift_logging_install_logging` is set to `False` the `openshift_loggin
- `openshift_logging_es_client_key`: The location of the client key Fluentd uses for openshift_logging_es_host. Defaults to '/etc/fluent/keys/key'.
- `openshift_logging_es_cluster_size`: The number of ES cluster members. Defaults to '1'.
-- `openshift_logging_es_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_cpu_request`: The minimum amount of CPU to allocate for an ES pod cluster member. Defaults to 1 CPU.
- `openshift_logging_es_memory_limit`: The amount of RAM that should be assigned to ES. Defaults to '8Gi'.
- `openshift_logging_es_log_appenders`: The list of rootLogger appenders for ES logs which can be: 'file', 'console'. Defaults to 'file'.
- `openshift_logging_es_pv_selector`: A key/value map added to a PVC in order to select specific PVs. Defaults to 'None'.
@@ -104,7 +107,7 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_client_cert`: /etc/fluent/keys/cert
- `openshift_logging_es_ops_client_key`: /etc/fluent/keys/key
- `openshift_logging_es_ops_cluster_size`: 1
-- `openshift_logging_es_ops_cpu_limit`: The amount of CPU limit for the ES cluster. Unused if not set
+- `openshift_logging_es_ops_cpu_request`: The minimum amount of CPU to allocate for an ES ops pod cluster member. Defaults to 1 CPU.
- `openshift_logging_es_ops_memory_limit`: 8Gi
- `openshift_logging_es_ops_pvc_dynamic`: False
- `openshift_logging_es_ops_pvc_size`: ""
@@ -112,9 +115,9 @@ same as above for their non-ops counterparts, but apply to the OPS cluster insta
- `openshift_logging_es_ops_recover_after_time`: 5m
- `openshift_logging_es_ops_storage_group`: 65534
- `openshift_logging_kibana_ops_hostname`: The Operations Kibana hostname. Defaults to 'kibana-ops.example.com'.
-- `openshift_logging_kibana_ops_cpu_limit`: The amount of CPU to allocate to Kibana or unset if not specified.
+- `openshift_logging_kibana_ops_cpu_request`: The minimum amount of CPU to allocate to Kibana or unset if not specified.
- `openshift_logging_kibana_ops_memory_limit`: The amount of memory to allocate to Kibana or unset if not specified.
-- `openshift_logging_kibana_ops_proxy_cpu_limit`: The amount of CPU to allocate to Kibana proxy or unset if not specified.
+- `openshift_logging_kibana_ops_proxy_cpu_request`: The minimum amount of CPU to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_proxy_memory_limit`: The amount of memory to allocate to Kibana proxy or unset if not specified.
- `openshift_logging_kibana_ops_replica_count`: The number of replicas Kibana ops should be scaled up to. Defaults to 1.
@@ -173,7 +176,7 @@ Elasticsearch OPS too, if using an OPS cluster:
clients will use to connect to mux, and will be used in the TLS server cert
subject.
- `openshift_logging_mux_port`: 24284
-- `openshift_logging_mux_cpu_limit`: 100m
+- `openshift_logging_mux_cpu_request`: 100m
- `openshift_logging_mux_memory_limit`: 512Mi
- `openshift_logging_mux_default_namespaces`: Default `["mux-undefined"]` - the
first value in the list is the namespace to use for undefined projects,
@@ -222,3 +225,80 @@ The corresponding openshift\_logging\_mux\_* parameters are below.
- `openshift_logging_mux_remote_syslog_tag_key`: If string specified, use this field from the record to set the key field on the syslog message
- `openshift_logging_mux_remote_syslog_use_record`: Set `true` to use the severity and facility from the record, defaults to `false`
- `openshift_logging_mux_remote_syslog_payload_key`: If string is specified, use this field from the record as the payload on the syslog message
+
+Image update procedure
+----------------------
+An upgrade of the logging stack from older version to newer is an automated process and should be performed by calling appropriate ansible playbook and setting required ansible variables in your inventory as documented in https://docs.openshift.org/.
+
+Following text describes manual update of the logging images without version upgrade. To determine the current version of images being used you can.
+```
+oc describe pod | grep 'Image ID:'
+```
+This will get the repo digest that can later be compared to the inspected image details.
+
+A way to determine when was your image last updated:
+```
+$ docker images
+REPOSITORY TAG IMAGE ID CREATED SIZE
+<registry>/openshift3/logging-fluentd v3.7 ff2e249fc45a About an hour ago 235.2 MB
+
+$ docker inspect ff2e249fc45a
+[
+ {
+ . . .
+ "RepoDigests": [
+ "<registry>/openshift3/logging-fluentd@sha256:4346f0aa9694f32735115705ad324803b1a6ff08343c3288f7a62c3a5cb70495"
+ ],
+ . . .
+ "Config": {
+ . . .
+ "Labels": {
+ . . .
+ "build-date": "2017-10-12T14:38:22.414827",
+ . . .
+ "release": "0.143.3.0",
+ . . .
+ "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/openshift3/logging-fluentd/images/v3.7.0-0.143.3.0",
+ . . .
+ "version": "v3.7.0"
+ }
+ },
+ . . .
+```
+
+Pull a new image to see if registry has any newer images with the same tag:
+```
+$ docker pull <registry>/openshift3/logging-fluentd:v3.7
+```
+
+If there was an update, you need to run the `docker pull` on each node.
+
+It is recommended that you now rerun the `openshift_logging` playbook to ensure that any necessary config changes are also picked up.
+
+To manually redeploy your pod you can do the following:
+- for a DC you can do:
+```
+oc rollout latest <dc_name>
+```
+
+- for a RC you can scale down and scale back up
+```
+oc scale --replicas=0 <rc_name>
+
+... wait for scale down
+
+oc scale --replicas=<original_replica_count> <rc_name>
+```
+
+- for a DS you can delete the pod or unlabel and relabel your node
+```
+oc delete pod --selector=<ds_selector>
+```
+
+Changelog
+---------
+Tue Oct 26, 2017
+- Make CPU request equal limit if limit is greater then request
+
+Tue Oct 10, 2017
+- Default imagePullPolicy changed from Always to IfNotPresent
diff --git a/roles/openshift_logging/defaults/main.yml b/roles/openshift_logging/defaults/main.yml
index 0f1f659c6..626732d16 100644
--- a/roles/openshift_logging/defaults/main.yml
+++ b/roles/openshift_logging/defaults/main.yml
@@ -18,20 +18,24 @@ openshift_logging_curator_run_minute: 0
openshift_logging_curator_run_timezone: UTC
openshift_logging_curator_script_log_level: INFO
openshift_logging_curator_log_level: ERROR
-openshift_logging_curator_cpu_limit: 100m
-openshift_logging_curator_memory_limit: null
+openshift_logging_curator_cpu_limit: null
+openshift_logging_curator_memory_limit: 256Mi
+openshift_logging_curator_cpu_request: 100m
openshift_logging_curator_nodeselector: {}
-openshift_logging_curator_ops_cpu_limit: 100m
-openshift_logging_curator_ops_memory_limit: null
+openshift_logging_curator_ops_cpu_limit: null
+openshift_logging_curator_ops_memory_limit: 256Mi
+openshift_logging_curator_ops_cpu_request: 100m
openshift_logging_curator_ops_nodeselector: {}
openshift_logging_kibana_hostname: "{{ 'kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_cpu_limit: null
openshift_logging_kibana_memory_limit: 736Mi
+openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
openshift_logging_kibana_proxy_memory_limit: 256Mi
+openshift_logging_kibana_proxy_cpu_request: 100m
openshift_logging_kibana_replica_count: 1
openshift_logging_kibana_edge_term_policy: Redirect
@@ -53,9 +57,11 @@ openshift_logging_kibana_ca: ""
openshift_logging_kibana_ops_hostname: "{{ 'kibana-ops.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_kibana_ops_cpu_limit: null
openshift_logging_kibana_ops_memory_limit: 736Mi
+openshift_logging_kibana_ops_cpu_request: 100m
openshift_logging_kibana_ops_proxy_debug: false
openshift_logging_kibana_ops_proxy_cpu_limit: null
openshift_logging_kibana_ops_proxy_memory_limit: 256Mi
+openshift_logging_kibana_ops_proxy_cpu_request: 100m
openshift_logging_kibana_ops_replica_count: 1
#The absolute path on the control node to the cert file to use
@@ -71,13 +77,14 @@ openshift_logging_kibana_ops_key: ""
openshift_logging_kibana_ops_ca: ""
openshift_logging_fluentd_nodeselector: {'logging-infra-fluentd': 'true'}
-openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_cpu_limit: null
openshift_logging_fluentd_memory_limit: 512Mi
+openshift_logging_fluentd_cpu_request: 100m
openshift_logging_fluentd_journal_source: ""
openshift_logging_fluentd_journal_read_from_head: ""
openshift_logging_fluentd_hosts: ['--all']
-openshift_logging_fluentd_buffer_queue_limit: 1024
-openshift_logging_fluentd_buffer_size_limit: 1m
+openshift_logging_fluentd_buffer_queue_limit: 32
+openshift_logging_fluentd_buffer_size_limit: 8m
openshift_logging_es_host: logging-es
openshift_logging_es_port: 9200
@@ -85,7 +92,8 @@ openshift_logging_es_ca: /etc/fluent/keys/ca
openshift_logging_es_client_cert: /etc/fluent/keys/cert
openshift_logging_es_client_key: /etc/fluent/keys/key
openshift_logging_es_cluster_size: 1
-openshift_logging_es_cpu_limit: 1000m
+openshift_logging_es_cpu_limit: null
+openshift_logging_es_cpu_request: "1"
# the logging appenders for the root loggers to write ES logs. Valid values: 'file', 'console'
openshift_logging_es_log_appenders: ['file']
openshift_logging_es_memory_limit: "8Gi"
@@ -94,12 +102,10 @@ openshift_logging_es_pvc_dynamic: "{{ openshift_logging_elasticsearch_pvc_dynami
openshift_logging_es_pvc_size: "{{ openshift_logging_elasticsearch_pvc_size | default('') }}"
openshift_logging_es_pvc_prefix: "{{ openshift_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
openshift_logging_es_recover_after_time: 5m
-openshift_logging_es_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_storage_group: "65534"
openshift_logging_es_nodeselector: {}
# openshift_logging_es_config is a hash to be merged into the defaults for the elasticsearch.yaml
openshift_logging_es_config: {}
-openshift_logging_es_number_of_shards: 1
-openshift_logging_es_number_of_replicas: 0
# for exposing es to external (outside of the cluster) clients
openshift_logging_es_allow_external: False
@@ -126,14 +132,15 @@ openshift_logging_es_ops_ca: /etc/fluent/keys/ca
openshift_logging_es_ops_client_cert: /etc/fluent/keys/cert
openshift_logging_es_ops_client_key: /etc/fluent/keys/key
openshift_logging_es_ops_cluster_size: "{{ openshift_logging_elasticsearch_ops_cluster_size | default(1) }}"
-openshift_logging_es_ops_cpu_limit: 1000m
-openshift_logging_es_ops_memory_limit: "8Gi"
+openshift_logging_es_ops_cpu_limit: null
+openshift_logging_es_ops_memory_limit: 8Gi
+openshift_logging_es_ops_cpu_request: "1"
openshift_logging_es_ops_pv_selector: "{{ openshift_loggingops_storage_labels | default('') }}"
openshift_logging_es_ops_pvc_dynamic: "{{ openshift_logging_elasticsearch_ops_pvc_dynamic | default(False) }}"
openshift_logging_es_ops_pvc_size: "{{ openshift_logging_elasticsearch_ops_pvc_size | default('') }}"
openshift_logging_es_ops_pvc_prefix: "{{ openshift_logging_elasticsearch_ops_pvc_prefix | default('logging-es-ops') }}"
openshift_logging_es_ops_recover_after_time: 5m
-openshift_logging_es_ops_storage_group: "{{ openshift_logging_elasticsearch_storage_group | default('65534') }}"
+openshift_logging_es_ops_storage_group: "65534"
openshift_logging_es_ops_nodeselector: {}
# for exposing es-ops to external (outside of the cluster) clients
@@ -160,8 +167,9 @@ openshift_logging_mux_allow_external: False
openshift_logging_use_mux: "{{ openshift_logging_mux_allow_external | default(False) }}"
openshift_logging_mux_hostname: "{{ 'mux.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true)) }}"
openshift_logging_mux_port: 24284
-openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 1Gi
+openshift_logging_mux_cpu_limit: null
+openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_cpu_request: 100m
# the namespace to use for undefined projects should come first, followed by any
# additional namespaces to create by default - users will typically not need to set this
openshift_logging_mux_default_namespaces: ["mux-undefined"]
diff --git a/roles/openshift_logging/filter_plugins/openshift_logging.py b/roles/openshift_logging/filter_plugins/openshift_logging.py
index eac086e81..e1a5ea726 100644
--- a/roles/openshift_logging/filter_plugins/openshift_logging.py
+++ b/roles/openshift_logging/filter_plugins/openshift_logging.py
@@ -3,6 +3,7 @@
'''
import random
+import re
def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
@@ -17,6 +18,47 @@ def es_storage(os_logging_facts, dc_name, pvc_claim, root='elasticsearch'):
return dict(kind='emptydir')
+def min_cpu(left, right):
+ '''Return the minimum cpu value of the two values given'''
+ message = "Unable to evaluate {} cpu value is specified correctly '{}'. Exp whole, decimal or int followed by M"
+ pattern = re.compile(r"^(\d*\.?\d*)([Mm])?$")
+ millis_per_core = 1000
+ if not right:
+ return left
+ m_left = pattern.match(left)
+ if not m_left:
+ raise RuntimeError(message.format("left", left))
+ m_right = pattern.match(right)
+ if not m_right:
+ raise RuntimeError(message.format("right", right))
+ left_value = float(m_left.group(1))
+ right_value = float(m_right.group(1))
+ if m_left.group(2) not in ["M", "m"]:
+ left_value = left_value * millis_per_core
+ if m_right.group(2) not in ["M", "m"]:
+ right_value = right_value * millis_per_core
+ response = left
+ if left_value != min(left_value, right_value):
+ response = right
+ return response
+
+
+def walk(source, path, default, delimiter='.'):
+ '''Walk the sourch hash given the path and return the value or default if not found'''
+ if not isinstance(source, dict):
+ raise RuntimeError('The source is not a walkable dict: {} path: {}'.format(source, path))
+ keys = path.split(delimiter)
+ max_depth = len(keys)
+ cur_depth = 0
+ while cur_depth < max_depth:
+ if keys[cur_depth] in source:
+ source = source[keys[cur_depth]]
+ cur_depth = cur_depth + 1
+ else:
+ return default
+ return source
+
+
def random_word(source_alpha, length):
''' Returns a random word given the source of characters to pick from and resulting length '''
return ''.join(random.choice(source_alpha) for i in range(length))
@@ -45,6 +87,21 @@ def map_from_pairs(source, delim="="):
return dict(item.split(delim) for item in source.split(","))
+def serviceaccount_name(qualified_sa):
+ ''' Returns the simple name from a fully qualified name '''
+ return qualified_sa.split(":")[-1]
+
+
+def serviceaccount_namespace(qualified_sa, default=None):
+ ''' Returns the namespace from a fully qualified name '''
+ seg = qualified_sa.split(":")
+ if len(seg) > 1:
+ return seg[-2]
+ if default:
+ return default
+ return seg[-1]
+
+
# pylint: disable=too-few-public-methods
class FilterModule(object):
''' OpenShift Logging Filters '''
@@ -56,5 +113,9 @@ class FilterModule(object):
'random_word': random_word,
'entry_from_named_pair': entry_from_named_pair,
'map_from_pairs': map_from_pairs,
- 'es_storage': es_storage
+ 'min_cpu': min_cpu,
+ 'es_storage': es_storage,
+ 'serviceaccount_name': serviceaccount_name,
+ 'serviceaccount_namespace': serviceaccount_namespace,
+ 'walk': walk
}
diff --git a/roles/openshift_logging/filter_plugins/test b/roles/openshift_logging/filter_plugins/test
new file mode 100644
index 000000000..bac25c012
--- /dev/null
+++ b/roles/openshift_logging/filter_plugins/test
@@ -0,0 +1,49 @@
+import unittest
+from openshift_logging import walk
+from openshift_logging import min_cpu
+
+class TestFilterMethods(unittest.TestCase):
+
+
+ def test_min_cpu_for_none(self):
+ source = "1000M"
+ self.assertEquals(min_cpu(source, None), "1000M")
+
+ def test_min_cpu_for_millis(self):
+ source = "1"
+ self.assertEquals(min_cpu(source, "0.1"), "0.1")
+
+
+ def test_min_cpu_for_whole(self):
+ source = "120M"
+ self.assertEquals(min_cpu(source, "2"), "120M")
+
+
+ def test_walk_find_key(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.xyz', 123, delimiter='#'), 'myvalue')
+
+
+ def test_walk_return_default(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.abc', 123, delimiter='#'), 123)
+
+
+ def test_walk_limit_max_depth(self):
+ source = {'foo': {'bar.xyz': 'myvalue'}}
+ self.assertEquals(walk(source,'foo#bar.abc#dontfindme', 123, delimiter='#'), 123)
+
+ def test_complex_hash(self):
+ source = {
+ 'elasticsearch': {
+ 'configmaps': {
+ 'logging-elasticsearch': {
+ 'elasticsearch.yml': "a string value"
+ }
+ }
+ }
+ }
+ self.assertEquals(walk(source,'elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', 123, delimiter='#'), "a string value")
+
+if __name__ == '__main__':
+ unittest.main()
diff --git a/roles/openshift_logging/library/openshift_logging_facts.py b/roles/openshift_logging/library/openshift_logging_facts.py
index 35accfb78..98d0d1c4f 100644
--- a/roles/openshift_logging/library/openshift_logging_facts.py
+++ b/roles/openshift_logging/library/openshift_logging_facts.py
@@ -171,22 +171,25 @@ class OpenshiftLoggingFacts(OCBaseCommand):
if comp is not None:
spec = dc_item["spec"]["template"]["spec"]
facts = dict(
+ name=name,
selector=dc_item["spec"]["selector"],
replicas=dc_item["spec"]["replicas"],
serviceAccount=spec["serviceAccount"],
containers=dict(),
volumes=dict()
)
+ if "nodeSelector" in spec:
+ facts["nodeSelector"] = spec["nodeSelector"]
+ if "supplementalGroups" in spec["securityContext"]:
+ facts["storageGroups"] = spec["securityContext"]["supplementalGroups"]
+ facts["spec"] = spec
if "volumes" in spec:
for vol in spec["volumes"]:
clone = copy.deepcopy(vol)
clone.pop("name", None)
facts["volumes"][vol["name"]] = clone
for container in spec["containers"]:
- facts["containers"][container["name"]] = dict(
- image=container["image"],
- resources=container["resources"],
- )
+ facts["containers"][container["name"]] = container
self.add_facts_for(comp, "deploymentconfigs", name, facts)
def facts_for_services(self, namespace):
@@ -204,7 +207,7 @@ class OpenshiftLoggingFacts(OCBaseCommand):
def facts_for_configmaps(self, namespace):
''' Gathers facts for configmaps in logging namespace '''
self.default_keys_for("configmaps")
- a_list = self.oc_command("get", "configmaps", namespace=namespace, add_options=["-l", LOGGING_SELECTOR])
+ a_list = self.oc_command("get", "configmaps", namespace=namespace)
if len(a_list["items"]) == 0:
return
for item in a_list["items"]:
diff --git a/roles/openshift_logging/tasks/delete_logging.yaml b/roles/openshift_logging/tasks/delete_logging.yaml
index 3040d15ca..ffed956a4 100644
--- a/roles/openshift_logging/tasks/delete_logging.yaml
+++ b/roles/openshift_logging/tasks/delete_logging.yaml
@@ -92,6 +92,7 @@
with_items:
- rolebinding-reader
- daemonset-admin
+ - prometheus-metrics-viewer
# delete our configmaps
- name: delete configmaps
diff --git a/roles/openshift_logging/tasks/generate_certs.yaml b/roles/openshift_logging/tasks/generate_certs.yaml
index 9c8f0986a..f526fd734 100644
--- a/roles/openshift_logging/tasks/generate_certs.yaml
+++ b/roles/openshift_logging/tasks/generate_certs.yaml
@@ -139,10 +139,10 @@
# TODO: make idempotent
- name: Generate proxy session
- set_fact: session_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(200)}}
+ set_fact: session_secret={{ 200 | oo_random_word}}
check_mode: no
# TODO: make idempotent
- name: Generate oauth client secret
- set_fact: oauth_secret={{'abcdefghijklmnopqrstuvwxyzABCDEFGHIJKLMNOPQRSTUVWXYZ0123456789'|random_word(64)}}
+ set_fact: oauth_secret={{ 64 | oo_random_word}}
check_mode: no
diff --git a/roles/openshift_logging/tasks/install_logging.yaml b/roles/openshift_logging/tasks/install_logging.yaml
index 2695ef030..b98e281a3 100644
--- a/roles/openshift_logging/tasks/install_logging.yaml
+++ b/roles/openshift_logging/tasks/install_logging.yaml
@@ -36,6 +36,14 @@
- openshift_logging_label_key != ""
- openshift_logging_label_value is defined
+- name: Annotate Logging Project to allow overcommit
+ oc_edit:
+ kind: ns
+ name: "{{ openshift_logging_namespace }}"
+ separator: '#'
+ content:
+ metadata#annotations#quota.openshift.io/cluster-resource-override-enabled: "false"
+
- name: Create logging cert directory
file:
path: "{{ openshift.common.config_base }}/logging"
@@ -69,19 +77,23 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
- openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
+ openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
+ _es_containers: "{{ outer_item.0.containers}}"
+ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch#configmaps#logging-elasticsearch#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch.deploymentconfigs.values() }}"
- "{{ openshift_logging_facts.elasticsearch.pvcs }}"
- "{{ es_indices }}"
+ loop_control:
+ loop_var: outer_item
when:
- openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count > 0
@@ -91,15 +103,15 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_cluster_size | int }}"
openshift_logging_elasticsearch_storage_type: "{{ elasticsearch_storage_type }}"
- openshift_logging_elasticsearch_pvc_size: "{{ openshift_logging_es_pvc_size }}"
- openshift_logging_elasticsearch_pvc_dynamic: "{{ openshift_logging_es_pvc_dynamic }}"
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_pv_selector }}"
with_sequence: count={{ openshift_logging_es_cluster_size | int - openshift_logging_facts.elasticsearch.deploymentconfigs.keys() | count }}
+ loop_control:
+ loop_var: outer_item
- set_fact: es_ops_indices={{ es_ops_indices | default([]) + [item | int - 1] }}
with_sequence: count={{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
@@ -123,8 +135,8 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_deployment_name: "{{ item.0 }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ item.2 if item.1 is none else item.1 }}"
+ openshift_logging_elasticsearch_deployment_name: "{{ outer_item.0.name }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix ~ '-' ~ outer_item.2 if outer_item.1 is none else outer_item.1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
@@ -134,18 +146,27 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
- openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
+ openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
+ openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector if outer_item.0.nodeSelector | default(None) is none else outer_item.0.nodeSelector }}"
+ openshift_logging_elasticsearch_storage_group: "{{ [openshift_logging_es_ops_storage_group] if outer_item.0.storageGroups | default([]) | length == 0 else outer_item.0.storageGroups }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
openshift_logging_es_ca_ext: "{{ openshift_logging_es_ops_ca_ext }}"
openshift_logging_es_hostname: "{{ openshift_logging_es_ops_hostname }}"
openshift_logging_es_edge_term_policy: "{{ openshift_logging_es_ops_edge_term_policy | default('') }}"
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
+ openshift_logging_es_number_of_shards: "{{ openshift_logging_es_ops_number_of_shards | default(None) }}"
+ openshift_logging_es_number_of_replicas: "{{ openshift_logging_es_ops_number_of_replicas | default(None) }}"
+ _es_containers: "{{ outer_item.0.containers}}"
+ _es_configmap: "{{ openshift_logging_facts | walk('elasticsearch_ops#configmaps#logging-elasticsearch-ops#elasticsearch.yml', '{}', delimiter='#') | from_yaml }}"
with_together:
- - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs }}"
+ - "{{ openshift_logging_facts.elasticsearch_ops.deploymentconfigs.values() }}"
- "{{ openshift_logging_facts.elasticsearch_ops.pvcs }}"
- "{{ es_ops_indices }}"
+ loop_control:
+ loop_var: outer_item
+
when:
- openshift_logging_use_ops | bool
- openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count > 0
@@ -156,7 +177,7 @@
vars:
generated_certs_dir: "{{openshift.common.config_base}}/logging"
openshift_logging_elasticsearch_namespace: "{{ openshift_logging_namespace }}"
- openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
+ openshift_logging_elasticsearch_pvc_name: "{{ openshift_logging_es_ops_pvc_prefix }}-{{ outer_item | int + openshift_logging_facts.elasticsearch_ops.deploymentconfigs | count - 1 }}"
openshift_logging_elasticsearch_ops_deployment: true
openshift_logging_elasticsearch_replica_count: "{{ openshift_logging_es_ops_cluster_size | int }}"
@@ -166,6 +187,7 @@
openshift_logging_elasticsearch_pvc_pv_selector: "{{ openshift_logging_es_ops_pv_selector }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_ops_memory_limit }}"
openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_ops_cpu_limit }}"
+ openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_ops_cpu_request }}"
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_ops_nodeselector }}"
openshift_logging_es_key: "{{ openshift_logging_es_ops_key }}"
openshift_logging_es_cert: "{{ openshift_logging_es_ops_cert }}"
@@ -175,6 +197,8 @@
openshift_logging_es_allow_external: "{{ openshift_logging_es_ops_allow_external }}"
with_sequence: count={{ openshift_logging_es_ops_cluster_size | int - openshift_logging_facts.elasticsearch_ops.deploymentconfigs.keys() | count }}
+ loop_control:
+ loop_var: outer_item
when:
- openshift_logging_use_ops | bool
@@ -206,11 +230,13 @@
openshift_logging_kibana_es_port: "{{ openshift_logging_es_ops_port }}"
openshift_logging_kibana_nodeselector: "{{ openshift_logging_kibana_ops_nodeselector }}"
openshift_logging_kibana_cpu_limit: "{{ openshift_logging_kibana_ops_cpu_limit }}"
+ openshift_logging_kibana_cpu_request: "{{ openshift_logging_kibana_ops_cpu_request }}"
openshift_logging_kibana_memory_limit: "{{ openshift_logging_kibana_ops_memory_limit }}"
openshift_logging_kibana_hostname: "{{ openshift_logging_kibana_ops_hostname }}"
openshift_logging_kibana_replicas: "{{ openshift_logging_kibana_ops_replica_count }}"
openshift_logging_kibana_proxy_debug: "{{ openshift_logging_kibana_ops_proxy_debug }}"
openshift_logging_kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_ops_proxy_cpu_limit }}"
+ openshift_logging_kibana_proxy_cpu_request: "{{ openshift_logging_kibana_ops_proxy_cpu_request }}"
openshift_logging_kibana_proxy_memory_limit: "{{ openshift_logging_kibana_ops_proxy_memory_limit }}"
openshift_logging_kibana_cert: "{{ openshift_logging_kibana_ops_cert }}"
openshift_logging_kibana_key: "{{ openshift_logging_kibana_ops_key }}"
@@ -242,6 +268,7 @@
openshift_logging_curator_master_url: "{{ openshift_logging_master_url }}"
openshift_logging_curator_image_pull_secret: "{{ openshift_logging_image_pull_secret }}"
openshift_logging_curator_cpu_limit: "{{ openshift_logging_curator_ops_cpu_limit }}"
+ openshift_logging_curator_cpu_request: "{{ openshift_logging_curator_ops_cpu_request }}"
openshift_logging_curator_memory_limit: "{{ openshift_logging_curator_ops_memory_limit }}"
openshift_logging_curator_nodeselector: "{{ openshift_logging_curator_ops_nodeselector }}"
when:
diff --git a/roles/openshift_logging/templates/jks_pod.j2 b/roles/openshift_logging/templates/jks_pod.j2
index 8b1c74211..e4110b7b3 100644
--- a/roles/openshift_logging/templates/jks_pod.j2
+++ b/roles/openshift_logging/templates/jks_pod.j2
@@ -8,7 +8,7 @@ spec:
containers:
- name: jks-cert-gen
image: {{openshift_logging_image_prefix}}logging-deployer:{{openshift_logging_image_version}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
command: ["sh", "{{generated_certs_dir}}/generate-jks.sh"]
securityContext:
privileged: true
diff --git a/roles/openshift_logging_curator/defaults/main.yml b/roles/openshift_logging_curator/defaults/main.yml
index 17807b644..9cae9f936 100644
--- a/roles/openshift_logging_curator/defaults/main.yml
+++ b/roles/openshift_logging_curator/defaults/main.yml
@@ -9,8 +9,9 @@ openshift_logging_curator_namespace: logging
### Common settings
openshift_logging_curator_nodeselector: ""
-openshift_logging_curator_cpu_limit: 100m
-openshift_logging_curator_memory_limit: null
+openshift_logging_curator_cpu_limit: null
+openshift_logging_curator_cpu_request: 100m
+openshift_logging_curator_memory_limit: 256Mi
openshift_logging_curator_es_host: "logging-es"
openshift_logging_curator_es_port: 9200
diff --git a/roles/openshift_logging_curator/tasks/main.yaml b/roles/openshift_logging_curator/tasks/main.yaml
index 6e8fab2b5..fcaf18ed4 100644
--- a/roles/openshift_logging_curator/tasks/main.yaml
+++ b/roles/openshift_logging_curator/tasks/main.yaml
@@ -90,6 +90,7 @@
es_host: "{{ openshift_logging_curator_es_host }}"
es_port: "{{ openshift_logging_curator_es_port }}"
curator_cpu_limit: "{{ openshift_logging_curator_cpu_limit }}"
+ curator_cpu_request: "{{ openshift_logging_curator_cpu_request | min_cpu(openshift_logging_curator_cpu_limit | default(none)) }}"
curator_memory_limit: "{{ openshift_logging_curator_memory_limit }}"
curator_replicas: "{{ openshift_logging_curator_replicas | default (1) }}"
curator_node_selector: "{{openshift_logging_curator_nodeselector | default({})}}"
diff --git a/roles/openshift_logging_curator/templates/curator.j2 b/roles/openshift_logging_curator/templates/curator.j2
index e74918a40..462128366 100644
--- a/roles/openshift_logging_curator/templates/curator.j2
+++ b/roles/openshift_logging_curator/templates/curator.j2
@@ -38,14 +38,27 @@ spec:
-
name: "curator"
image: {{image}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
resources:
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "") %}
limits:
+{% if curator_cpu_limit is defined and curator_cpu_limit is not none and curator_cpu_limit != "" %}
cpu: "{{curator_cpu_limit}}"
-{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "") or (curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "") %}
requests:
+{% if curator_cpu_request is defined and curator_cpu_request is not none and curator_cpu_request != "" %}
+ cpu: "{{curator_cpu_request}}"
+{% endif %}
+{% if curator_memory_limit is defined and curator_memory_limit is not none and curator_memory_limit != "" %}
memory: "{{curator_memory_limit}}"
+{% endif %}
+{% endif %}
{% endif %}
env:
-
diff --git a/roles/openshift_logging_elasticsearch/defaults/main.yml b/roles/openshift_logging_elasticsearch/defaults/main.yml
index 75bd479be..9fc6fd1d8 100644
--- a/roles/openshift_logging_elasticsearch/defaults/main.yml
+++ b/roles/openshift_logging_elasticsearch/defaults/main.yml
@@ -6,7 +6,8 @@ openshift_logging_elasticsearch_image_pull_secret: "{{ openshift_hosted_logging_
openshift_logging_elasticsearch_namespace: logging
openshift_logging_elasticsearch_nodeselector: "{{ openshift_logging_es_nodeselector | default('') }}"
-openshift_logging_elasticsearch_cpu_limit: 1000m
+openshift_logging_elasticsearch_cpu_limit: "{{ openshift_logging_es_cpu_limit | default('') }}"
+openshift_logging_elasticsearch_cpu_request: "{{ openshift_logging_es_cpu_request | default('1000m') }}"
openshift_logging_elasticsearch_memory_limit: "{{ openshift_logging_es_memory_limit | default('1Gi') }}"
openshift_logging_elasticsearch_recover_after_time: "{{ openshift_logging_es_recover_after_time | default('5m') }}"
@@ -33,13 +34,17 @@ openshift_logging_elasticsearch_pvc_size: ""
openshift_logging_elasticsearch_pvc_dynamic: false
openshift_logging_elasticsearch_pvc_pv_selector: {}
openshift_logging_elasticsearch_pvc_access_modes: ['ReadWriteOnce']
-openshift_logging_elasticsearch_storage_group: '65534'
+openshift_logging_elasticsearch_storage_group: ['65534']
openshift_logging_es_pvc_prefix: "{{ openshift_hosted_logging_elasticsearch_pvc_prefix | default('logging-es') }}"
# config the es plugin to write kibana index based on the index mode
openshift_logging_elasticsearch_kibana_index_mode: 'unique'
+openshift_logging_elasticsearch_proxy_cpu_request: "100m"
+openshift_logging_elasticsearch_proxy_memory_limit: "64Mi"
+openshift_logging_elasticsearch_prometheus_sa: "system:serviceaccount:{{openshift_prometheus_namespace | default('prometheus')}}:prometheus"
+
# this is used to determine if this is an operations deployment or a non-ops deployment
# simply used for naming purposes
openshift_logging_elasticsearch_ops_deployment: false
diff --git a/roles/openshift_logging_elasticsearch/files/es_migration.sh b/roles/openshift_logging_elasticsearch/files/es_migration.sh
deleted file mode 100644
index 339b5a1b2..000000000
--- a/roles/openshift_logging_elasticsearch/files/es_migration.sh
+++ /dev/null
@@ -1,79 +0,0 @@
-CA=${1:-/etc/openshift/logging/ca.crt}
-KEY=${2:-/etc/openshift/logging/system.admin.key}
-CERT=${3:-/etc/openshift/logging/system.admin.crt}
-openshift_logging_es_host=${4:-logging-es}
-openshift_logging_es_port=${5:-9200}
-namespace=${6:-logging}
-
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# skip indices that contain a uuid
-# get a list of unique project
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-function get_list_of_indices() {
- curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
- awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
- '$3 !~ "^[.]" && $3 !~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
- sort -u
-}
-
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# get a list of unique project.uuid
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-function get_list_of_proj_uuid_indices() {
- curl -s --cacert $CA --key $KEY --cert $CERT https://$openshift_logging_es_host:$openshift_logging_es_port/_cat/indices | \
- awk -v daterx='[.]20[0-9]{2}[.][0-1]?[0-9][.][0-9]{1,2}$' \
- '$3 !~ "^[.]" && $3 ~ "^[^.]+[.][^.]+"daterx && $3 !~ "^project." && $3 ~ daterx {print gensub(daterx, "", "", $3)}' | \
- sort -u
-}
-
-if [[ -z "$(oc get pods -l component=es -o jsonpath='{.items[?(@.status.phase == "Running")].metadata.name}')" ]]; then
- echo "No Elasticsearch pods found running. Cannot update common data model."
- exit 1
-fi
-
-count=$(get_list_of_indices | wc -l)
-if [ $count -eq 0 ]; then
- echo No matching indices found - skipping update_for_uuid
-else
- echo Creating aliases for $count index patterns . . .
- {
- echo '{"actions":['
- get_list_of_indices | \
- while IFS=. read proj ; do
- # e.g. make test.uuid.* an alias of test.* so we can search for
- # /test.uuid.*/_search and get both the test.uuid.* and
- # the test.* indices
- uid=$(oc get project "$proj" -o jsonpath='{.metadata.uid}' 2>/dev/null)
- [ -n "$uid" ] && echo "{\"add\":{\"index\":\"$proj.*\",\"alias\":\"$proj.$uuid.*\"}}"
- done
- echo ']}'
- } | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
-fi
-
-count=$(get_list_of_proj_uuid_indices | wc -l)
-if [ $count -eq 0 ] ; then
- echo No matching indexes found - skipping update_for_common_data_model
- exit 0
-fi
-
-echo Creating aliases for $count index patterns . . .
-# for each index in _cat/indices
-# skip indices that begin with . - .kibana, .operations, etc.
-# get a list of unique project.uuid
-# daterx - the date regex that matches the .%Y.%m.%d at the end of the indices
-# we are interested in - the awk will strip that part off
-{
- echo '{"actions":['
- get_list_of_proj_uuid_indices | \
- while IFS=. read proj uuid ; do
- # e.g. make project.test.uuid.* and alias of test.uuid.* so we can search for
- # /project.test.uuid.*/_search and get both the test.uuid.* and
- # the project.test.uuid.* indices
- echo "{\"add\":{\"index\":\"$proj.$uuid.*\",\"alias\":\"${PROJ_PREFIX}$proj.$uuid.*\"}}"
- done
- echo ']}'
-} | curl -s --cacert $CA --key $KEY --cert $CERT -XPOST -d @- "https://$openshift_logging_es_host:$openshift_logging_es_port/_aliases"
diff --git a/roles/openshift_logging_elasticsearch/tasks/main.yaml b/roles/openshift_logging_elasticsearch/tasks/main.yaml
index 1e800b1d6..e7ef443bd 100644
--- a/roles/openshift_logging_elasticsearch/tasks/main.yaml
+++ b/roles/openshift_logging_elasticsearch/tasks/main.yaml
@@ -17,6 +17,17 @@
- include: determine_version.yaml
+- name: Set default image variables based on deployment_type
+ include_vars: "{{ item }}"
+ with_first_found:
+ - "{{ openshift_deployment_type | default(deployment_type) }}.yml"
+ - "default_images.yml"
+
+- name: Set elasticsearch_prefix image facts
+ set_fact:
+ openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_elasticsearch_proxy_image_prefix | default(__openshift_logging_elasticsearch_proxy_image_prefix) }}"
+ openshift_logging_elasticsearch_proxy_image_version: "{{ openshift_logging_elasticsearch_proxy_image_version | default(__openshift_logging_elasticsearch_proxy_image_version) }}"
+
# allow passing in a tempdir
- name: Create temp directory for doing work in
command: mktemp -d /tmp/openshift-logging-ansible-XXXXXX
@@ -37,6 +48,7 @@
# we want to make sure we have all the necessary components here
# service account
+
- name: Create ES service account
oc_serviceaccount:
state: present
@@ -51,7 +63,7 @@
name: "aggregated-logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
when:
- - openshift_logging_image_pull_secret == ''
+ - openshift_logging_image_pull_secret == ''
# rolebinding reader
- copy:
@@ -65,7 +77,7 @@
kind: clusterrole
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/rolebinding-reader.yml"
+ - "{{ tempdir }}/rolebinding-reader.yml"
delete_after: true
# SA roles
@@ -77,6 +89,38 @@
resource_name: rolebinding-reader
user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace }}:aggregated-logging-elasticsearch"
+- oc_adm_policy_user:
+ state: present
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ resource_kind: cluster-role
+ resource_name: system:auth-delegator
+ user: "system:serviceaccount:{{ openshift_logging_elasticsearch_namespace}}:aggregated-logging-elasticsearch"
+
+# logging-metrics-reader role
+- template:
+ src: logging-metrics-role.j2
+ dest: "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
+ vars:
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ role_namespace: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_namespace(openshift_logging_elasticsearch_namespace) }}"
+ role_user: "{{ openshift_logging_elasticsearch_prometheus_sa | serviceaccount_name }}"
+
+- name: Create logging-metrics-reader-role
+ command: >
+ {{ openshift.common.client_binary }}
+ --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ -n "{{ openshift_logging_elasticsearch_namespace }}"
+ create -f "{{mktemp.stdout}}/templates/logging-metrics-role.yml"
+ register: prometheus_out
+ check_mode: no
+ ignore_errors: yes
+
+- fail:
+ msg: "There was an error creating the logging-metrics-role and binding: {{prometheus_out}}"
+ when:
+ - "prometheus_out.stderr | length > 0"
+ - "'already exists' not in prometheus_out.stderr"
+
# View role and binding
- name: Generate logging-elasticsearch-view-role
template:
@@ -87,8 +131,8 @@
roleRef:
name: view
subjects:
- - kind: ServiceAccount
- name: aggregated-logging-elasticsearch
+ - kind: ServiceAccount
+ name: aggregated-logging-elasticsearch
changed_when: no
- name: Set logging-elasticsearch-view-role role
@@ -98,18 +142,18 @@
kind: rolebinding
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
+ - "{{ tempdir }}/logging-elasticsearch-view-role.yaml"
delete_after: true
# configmap
- assert:
that:
- - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
+ - openshift_logging_elasticsearch_kibana_index_mode in __kibana_index_modes
msg: "The openshift_logging_elasticsearch_kibana_index_mode '{{ openshift_logging_elasticsearch_kibana_index_mode }}' only supports one of: {{ __kibana_index_modes | join(', ') }}"
- assert:
that:
- - "{{ openshift_logging_es_log_appenders | length > 0 }}"
+ - "{{ openshift_logging_es_log_appenders | length > 0 }}"
msg: "The openshift_logging_es_log_appenders '{{ openshift_logging_es_log_appenders }}' has an unrecognized option and only supports the following as a list: {{ __es_log_appenders | join(', ') }}"
- template:
@@ -120,13 +164,17 @@
when: es_logging_contents is undefined
changed_when: no
+- set_fact:
+ __es_num_of_shards: "{{ _es_configmap | default({}) | walk('index.number_of_shards', '1') }}"
+ __es_num_of_replicas: "{{ _es_configmap | default({}) | walk('index.number_of_replicas', '0') }}"
+
- template:
src: elasticsearch.yml.j2
dest: "{{ tempdir }}/elasticsearch.yml"
vars:
allow_cluster_reader: "{{ openshift_logging_elasticsearch_ops_allow_cluster_reader | lower | default('false') }}"
- es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(1) }}"
- es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(0) }}"
+ es_number_of_shards: "{{ openshift_logging_es_number_of_shards | default(None) or __es_num_of_shards }}"
+ es_number_of_replicas: "{{ openshift_logging_es_number_of_replicas | default(None) or __es_num_of_replicas }}"
es_kibana_index_mode: "{{ openshift_logging_elasticsearch_kibana_index_mode | default('unique') }}"
when: es_config_contents is undefined
@@ -161,22 +209,22 @@
name: "logging-elasticsearch"
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
files:
- - name: key
- path: "{{ generated_certs_dir }}/logging-es.jks"
- - name: truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: searchguard.key
- path: "{{ generated_certs_dir }}/elasticsearch.jks"
- - name: searchguard.truststore
- path: "{{ generated_certs_dir }}/truststore.jks"
- - name: admin-key
- path: "{{ generated_certs_dir }}/system.admin.key"
- - name: admin-cert
- path: "{{ generated_certs_dir }}/system.admin.crt"
- - name: admin-ca
- path: "{{ generated_certs_dir }}/ca.crt"
- - name: admin.jks
- path: "{{ generated_certs_dir }}/system.admin.jks"
+ - name: key
+ path: "{{ generated_certs_dir }}/logging-es.jks"
+ - name: truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: searchguard.key
+ path: "{{ generated_certs_dir }}/elasticsearch.jks"
+ - name: searchguard.truststore
+ path: "{{ generated_certs_dir }}/truststore.jks"
+ - name: admin-key
+ path: "{{ generated_certs_dir }}/system.admin.key"
+ - name: admin-cert
+ path: "{{ generated_certs_dir }}/system.admin.crt"
+ - name: admin-ca
+ path: "{{ generated_certs_dir }}/ca.crt"
+ - name: admin.jks
+ path: "{{ generated_certs_dir }}/system.admin.jks"
# services
- name: Set logging-{{ es_component }}-cluster service
@@ -190,7 +238,7 @@
labels:
logging-infra: 'support'
ports:
- - port: 9300
+ - port: 9300
- name: Set logging-{{ es_component }} service
oc_service:
@@ -203,8 +251,34 @@
labels:
logging-infra: 'support'
ports:
- - port: 9200
- targetPort: "restapi"
+ - port: 9200
+ targetPort: "restapi"
+
+- name: Set logging-{{ es_component}}-prometheus service
+ oc_service:
+ state: present
+ name: "logging-{{es_component}}-prometheus"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ labels:
+ logging-infra: 'support'
+ ports:
+ - name: proxy
+ port: 443
+ targetPort: 4443
+ selector:
+ component: "{{ es_component }}-prometheus"
+ provider: openshift
+
+- oc_edit:
+ kind: service
+ name: "logging-{{es_component}}-prometheus"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ separator: '#'
+ content:
+ metadata#annotations#service.alpha.openshift.io/serving-cert-secret-name: "prometheus-tls"
+ metadata#annotations#prometheus.io/scrape: "true"
+ metadata#annotations#prometheus.io/scheme: "https"
+ metadata#annotations#prometheus.io/path: "_prometheus/metrics"
- name: Check to see if PVC already exists
oc_obj:
@@ -218,49 +292,49 @@
# so we check for the presence of 'stderr' to determine if the obj exists or not
# the RC for existing and not existing is both 0
- when:
- - logging_elasticsearch_pvc.results.stderr is defined
- - openshift_logging_elasticsearch_storage_type == "pvc"
+ - logging_elasticsearch_pvc.results.stderr is defined
+ - openshift_logging_elasticsearch_storage_type == "pvc"
block:
- # storageclasses are used by default but if static then disable
- # storageclasses with the storageClassName set to "" in pvc.j2
- - name: Creating ES storage template - static
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
- when:
- - not openshift_logging_elasticsearch_pvc_dynamic | bool
-
- # Storageclasses are used by default if configured
- - name: Creating ES storage template - dynamic
- template:
- src: pvc.j2
- dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
- vars:
- obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
- access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
- pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
- when:
- - openshift_logging_elasticsearch_pvc_dynamic | bool
-
- - name: Set ES storage
- oc_obj:
- state: present
- kind: pvc
- name: "{{ openshift_logging_elasticsearch_pvc_name }}"
- namespace: "{{ openshift_logging_elasticsearch_namespace }}"
- files:
- - "{{ tempdir }}/templates/logging-es-pvc.yml"
- delete_after: true
+ # storageclasses are used by default but if static then disable
+ # storageclasses with the storageClassName set to "" in pvc.j2
+ - name: Creating ES storage template - static
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ storage_class_name: "{{ openshift_logging_elasticsearch_pvc_storage_class_name | default('', true) }}"
+ when:
+ - not openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ # Storageclasses are used by default if configured
+ - name: Creating ES storage template - dynamic
+ template:
+ src: pvc.j2
+ dest: "{{ tempdir }}/templates/logging-es-pvc.yml"
+ vars:
+ obj_name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ size: "{{ (openshift_logging_elasticsearch_pvc_size | trim | length == 0) | ternary('10Gi', openshift_logging_elasticsearch_pvc_size) }}"
+ access_modes: "{{ openshift_logging_elasticsearch_pvc_access_modes | list }}"
+ pv_selector: "{{ openshift_logging_elasticsearch_pvc_pv_selector }}"
+ when:
+ - openshift_logging_elasticsearch_pvc_dynamic | bool
+
+ - name: Set ES storage
+ oc_obj:
+ state: present
+ kind: pvc
+ name: "{{ openshift_logging_elasticsearch_pvc_name }}"
+ namespace: "{{ openshift_logging_elasticsearch_namespace }}"
+ files:
+ - "{{ tempdir }}/templates/logging-es-pvc.yml"
+ delete_after: true
- set_fact:
- es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 'abcdefghijklmnopqrstuvwxyz0123456789' | random_word(8) }}"
+ es_deploy_name: "logging-{{ es_component }}-{{ openshift_logging_elasticsearch_deployment_type }}-{{ 8 | oo_random_word('abcdefghijklmnopqrstuvwxyz0123456789') }}"
when: openshift_logging_elasticsearch_deployment_name == ""
- set_fact:
@@ -278,9 +352,13 @@
logging_component: elasticsearch
deploy_name: "{{ es_deploy_name }}"
image: "{{ openshift_logging_elasticsearch_image_prefix }}logging-elasticsearch:{{ openshift_logging_elasticsearch_image_version }}"
- es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit }}"
+ proxy_image: "{{ openshift_logging_elasticsearch_proxy_image_prefix }}oauth-proxy:{{ openshift_logging_elasticsearch_proxy_image_version }}"
+ es_cpu_limit: "{{ openshift_logging_elasticsearch_cpu_limit | default('') }}"
+ es_cpu_request: "{{ openshift_logging_elasticsearch_cpu_request | min_cpu(openshift_logging_elasticsearch_cpu_limit | default(none)) }}"
es_memory_limit: "{{ openshift_logging_elasticsearch_memory_limit }}"
es_node_selector: "{{ openshift_logging_elasticsearch_nodeselector | default({}) }}"
+ es_storage_groups: "{{ openshift_logging_elasticsearch_storage_group | default([]) }}"
+ es_container_security_context: "{{ _es_containers.elasticsearch.securityContext if _es_containers is defined and 'elasticsearch' in _es_containers and 'securityContext' in _es_containers.elasticsearch else None }}"
deploy_type: "{{ openshift_logging_elasticsearch_deployment_type }}"
es_replicas: 1
@@ -291,7 +369,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: dc
files:
- - "{{ tempdir }}/templates/logging-es-dc.yml"
+ - "{{ tempdir }}/templates/logging-es-dc.yml"
delete_after: true
- name: Retrieving the cert to use when generating secrets for the {{ es_component }} component
@@ -299,37 +377,37 @@
src: "{{ generated_certs_dir }}/{{ item.file }}"
register: key_pairs
with_items:
- - { name: "ca_file", file: "ca.crt" }
- - { name: "es_key", file: "system.logging.es.key" }
- - { name: "es_cert", file: "system.logging.es.crt" }
+ - { name: "ca_file", file: "ca.crt" }
+ - { name: "es_key", file: "system.logging.es.key" }
+ - { name: "es_cert", file: "system.logging.es.crt" }
when: openshift_logging_es_allow_external | bool
- set_fact:
es_key: "{{ lookup('file', openshift_logging_es_key) | b64encode }}"
when:
- - openshift_logging_es_key | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_key | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_cert: "{{ lookup('file', openshift_logging_es_cert) | b64encode }}"
when:
- - openshift_logging_es_cert | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_cert | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ lookup('file', openshift_logging_es_ca_ext) | b64encode }}"
when:
- - openshift_logging_es_ca_ext | trim | length > 0
- - openshift_logging_es_allow_external | bool
+ - openshift_logging_es_ca_ext | trim | length > 0
+ - openshift_logging_es_allow_external | bool
changed_when: false
- set_fact:
es_ca: "{{ key_pairs | entry_from_named_pair('ca_file') }}"
when:
- - es_ca is not defined
- - openshift_logging_es_allow_external | bool
+ - es_ca is not defined
+ - openshift_logging_es_allow_external | bool
changed_when: false
- name: Generating Elasticsearch {{ es_component }} route template
@@ -360,7 +438,7 @@
namespace: "{{ openshift_logging_elasticsearch_namespace }}"
kind: route
files:
- - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
+ - "{{ tempdir }}/templates/logging-{{ es_component }}-route.yaml"
when: openshift_logging_es_allow_external | bool
## Placeholder for migration when necessary ##
diff --git a/roles/openshift_logging_elasticsearch/templates/es.j2 b/roles/openshift_logging_elasticsearch/templates/es.j2
index 3c8f390c4..0c7d8b46e 100644
--- a/roles/openshift_logging_elasticsearch/templates/es.j2
+++ b/roles/openshift_logging_elasticsearch/templates/es.j2
@@ -29,7 +29,9 @@ spec:
serviceAccountName: aggregated-logging-elasticsearch
securityContext:
supplementalGroups:
- - {{openshift_logging_elasticsearch_storage_group}}
+{% for group in es_storage_groups %}
+ - {{group}}
+{% endfor %}
{% if es_node_selector is iterable and es_node_selector | length > 0 %}
nodeSelector:
{% for key, value in es_node_selector.iteritems() %}
@@ -37,18 +39,56 @@ spec:
{% endfor %}
{% endif %}
containers:
+ - name: proxy
+ image: {{ proxy_image }}
+ imagePullPolicy: IfNotPresent
+ args:
+ - --upstream-ca=/etc/elasticsearch/secret/admin-ca
+ - --https-address=:4443
+ - -provider=openshift
+ - -client-id={{openshift_logging_elasticsearch_prometheus_sa}}
+ - -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
+ - -cookie-secret={{ 16 | oo_random_word | b64encode }}
+ - -upstream=https://localhost:9200
+ - '-openshift-sar={"namespace": "{{ openshift_logging_elasticsearch_namespace}}", "verb": "view", "resource": "prometheus", "group": "metrics.openshift.io"}'
+ - '-openshift-delegate-urls={"/": {"resource": "prometheus", "verb": "view", "group": "metrics.openshift.io", "namespace": "{{ openshift_logging_elasticsearch_namespace}}"}}'
+ - --tls-cert=/etc/tls/private/tls.crt
+ - --tls-key=/etc/tls/private/tls.key
+ - -pass-access-token
+ - -pass-user-headers
+ ports:
+ - containerPort: 4443
+ name: proxy
+ protocol: TCP
+ volumeMounts:
+ - mountPath: /etc/tls/private
+ name: proxy-tls
+ readOnly: true
+ - mountPath: /etc/elasticsearch/secret
+ name: elasticsearch
+ readOnly: true
+ resources:
+ limits:
+ memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
+ requests:
+ cpu: "{{openshift_logging_elasticsearch_proxy_cpu_request }}"
+ memory: "{{openshift_logging_elasticsearch_proxy_memory_limit }}"
-
name: "elasticsearch"
image: {{image}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
resources:
limits:
- memory: "{{es_memory_limit}}"
-{% if es_cpu_limit is defined and es_cpu_limit is not none %}
+{% if es_cpu_limit is defined and es_cpu_limit is not none and es_cpu_limit != '' %}
cpu: "{{es_cpu_limit}}"
{% endif %}
+ memory: "{{es_memory_limit}}"
requests:
+ cpu: "{{es_cpu_request}}"
memory: "{{es_memory_limit}}"
+{% if es_container_security_context %}
+ securityContext: {{ es_container_security_context | to_yaml }}
+{% endif %}
ports:
-
containerPort: 9200
@@ -94,7 +134,7 @@ spec:
value: "30"
-
name: "POD_LABEL"
- value: "component={{component}}"
+ value: "component={{component}}"
-
name: "IS_MASTER"
value: "{% if deploy_type in ['data-master', 'master'] %}true{% else %}false{% endif %}"
@@ -102,6 +142,9 @@ spec:
-
name: "HAS_DATA"
value: "{% if deploy_type in ['data-master', 'data-client'] %}true{% else %}false{% endif %}"
+ -
+ name: "PROMETHEUS_USER"
+ value: "{{openshift_logging_elasticsearch_prometheus_sa}}"
volumeMounts:
- name: elasticsearch
@@ -120,6 +163,9 @@ spec:
timeoutSeconds: 30
periodSeconds: 5
volumes:
+ - name: proxy-tls
+ secret:
+ secretName: prometheus-tls
- name: elasticsearch
secret:
secretName: logging-elasticsearch
diff --git a/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2 b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2
new file mode 100644
index 000000000..d9800e5a5
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/templates/logging-metrics-role.j2
@@ -0,0 +1,31 @@
+---
+apiVersion: v1
+kind: List
+items:
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: Role
+ metadata:
+ annotations:
+ rbac.authorization.kubernetes.io/autoupdate: "true"
+ name: prometheus-metrics-viewer
+ namespace: {{ namespace }}
+ rules:
+ - apiGroups:
+ - metrics.openshift.io
+ resources:
+ - prometheus
+ verbs:
+ - view
+- apiVersion: rbac.authorization.k8s.io/v1beta1
+ kind: RoleBinding
+ metadata:
+ name: prometheus-metrics-viewer
+ namespace: {{ namespace }}
+ roleRef:
+ apiGroup: rbac.authorization.k8s.io
+ kind: Role
+ name: prometheus-metrics-viewer
+ subjects:
+ - kind: ServiceAccount
+ namespace: {{ role_namespace }}
+ name: {{ role_user }}
diff --git a/roles/openshift_logging_elasticsearch/vars/default_images.yml b/roles/openshift_logging_elasticsearch/vars/default_images.yml
new file mode 100644
index 000000000..b7d105caf
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/default_images.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_elasticsearch_proxy_image_prefix: "docker.io/openshift/"
+__openshift_logging_elasticsearch_proxy_image_version: "v1.0.0"
diff --git a/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
new file mode 100644
index 000000000..2fd960bb5
--- /dev/null
+++ b/roles/openshift_logging_elasticsearch/vars/openshift-enterprise.yml
@@ -0,0 +1,3 @@
+---
+__openshift_logging_elasticsearch_proxy_image_prefix: "{{ openshift_logging_image_prefix | default('registry.access.redhat.com/openshift3/') }}"
+__openshift_logging_elasticsearch_proxy_image_version: "v3.7"
diff --git a/roles/openshift_logging_eventrouter/README.md b/roles/openshift_logging_eventrouter/README.md
index da313d68b..611bdaee0 100644
--- a/roles/openshift_logging_eventrouter/README.md
+++ b/roles/openshift_logging_eventrouter/README.md
@@ -3,9 +3,9 @@ Event router
A pod forwarding kubernetes events to EFK aggregated logging stack.
-- **eventrouter** is deployed to logging project, has a service account and its own role to read events
+- **eventrouter** is deployed to default project, has a service account and its own role to read events
- **eventrouter** watches kubernetes events, marshalls them to JSON and outputs to its sink, currently only various formatting to STDOUT
-- **fluentd** picks them up and inserts to elasticsearch *.operations* index
+- **fluentd** ingests as logs from **eventrouter** container (as it would any other container), and writes them to the appropriate index for the **eventrouter**'s namespace (in the 'default' namespace, the *.operations* index is used)
- `openshift_logging_install_eventrouter`: When 'True', eventrouter will be installed. When 'False', eventrouter will be uninstalled.
@@ -15,6 +15,6 @@ Configuration variables:
- `openshift_logging_eventrouter_image_version`: The image version for the logging eventrouter. Defaults to 'latest'.
- `openshift_logging_eventrouter_sink`: Select a sink for eventrouter, supported 'stdout' and 'glog'. Defaults to 'stdout'.
- `openshift_logging_eventrouter_nodeselector`: A map of labels (e.g. {"node":"infra","region":"west"} to select the nodes where the pod will land.
-- `openshift_logging_eventrouter_cpu_limit`: The amount of CPU to allocate to eventrouter. Defaults to '100m'.
+- `openshift_logging_eventrouter_cpu_request`: The minimum amount of CPU to allocate to eventrouter. Defaults to '100m'.
- `openshift_logging_eventrouter_memory_limit`: The memory limit for eventrouter pods. Defaults to '128Mi'.
- `openshift_logging_eventrouter_namespace`: The namespace where eventrouter is deployed. Defaults to 'default'.
diff --git a/roles/openshift_logging_eventrouter/defaults/main.yaml b/roles/openshift_logging_eventrouter/defaults/main.yaml
index 34e33f75f..4c0350c98 100644
--- a/roles/openshift_logging_eventrouter/defaults/main.yaml
+++ b/roles/openshift_logging_eventrouter/defaults/main.yaml
@@ -4,6 +4,7 @@ openshift_logging_eventrouter_image_version: "{{ openshift_logging_image_version
openshift_logging_eventrouter_replicas: 1
openshift_logging_eventrouter_sink: stdout
openshift_logging_eventrouter_nodeselector: ""
-openshift_logging_eventrouter_cpu_limit: 100m
+openshift_logging_eventrouter_cpu_limit: null
+openshift_logging_eventrouter_cpu_request: 100m
openshift_logging_eventrouter_memory_limit: 128Mi
openshift_logging_eventrouter_namespace: default
diff --git a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
index 91708e54b..cc01c010d 100644
--- a/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
+++ b/roles/openshift_logging_eventrouter/files/eventrouter-template.yaml
@@ -56,7 +56,7 @@ objects:
containers:
- name: kube-eventrouter
image: ${IMAGE}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
resources:
limits:
memory: ${MEMORY}
diff --git a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
index 8df7435e2..cbbc6a8ec 100644
--- a/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
+++ b/roles/openshift_logging_eventrouter/tasks/install_eventrouter.yaml
@@ -45,7 +45,7 @@
params:
IMAGE: "{{openshift_logging_eventrouter_image_prefix}}logging-eventrouter:{{openshift_logging_eventrouter_image_version}}"
REPLICAS: "{{ openshift_logging_eventrouter_replicas }}"
- CPU: "{{ openshift_logging_eventrouter_cpu_limit }}"
+ CPU: "{{ openshift_logging_eventrouter_cpu_request }}"
MEMORY: "{{ openshift_logging_eventrouter_memory_limit }}"
NAMESPACE: "{{ openshift_logging_eventrouter_namespace }}"
SINK: "{{ openshift_logging_eventrouter_sink }}"
diff --git a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2 b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
index 9ff4c7e80..5a4f7f762 100644
--- a/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
+++ b/roles/openshift_logging_eventrouter/templates/eventrouter-template.j2
@@ -25,7 +25,7 @@ objects:
metadata:
name: logging-eventrouter
data:
- config.json: |-
+ config.json: |-
{
"sink": "${SINK}"
}
@@ -54,20 +54,20 @@ objects:
serviceAccount: aggregated-logging-eventrouter
serviceAccountName: aggregated-logging-eventrouter
{% if node_selector is iterable and node_selector | length > 0 %}
- nodeSelector:
+ nodeSelector:
{% for key, value in node_selector.iteritems() %}
- {{ key }}: "{{ value }}"
+ {{ key }}: "{{ value }}"
{% endfor %}
{% endif %}
containers:
- name: kube-eventrouter
image: ${IMAGE}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
resources:
limits:
- memory: ${MEMORY}
- cpu: ${CPU}
+ memory: ${MEMORY}
requires:
+ cpu: ${CPU}
memory: ${MEMORY}
volumeMounts:
- name: config-volume
diff --git a/roles/openshift_logging_fluentd/defaults/main.yml b/roles/openshift_logging_fluentd/defaults/main.yml
index 82326bdd1..861935c99 100644
--- a/roles/openshift_logging_fluentd/defaults/main.yml
+++ b/roles/openshift_logging_fluentd/defaults/main.yml
@@ -8,7 +8,8 @@ openshift_logging_fluentd_namespace: logging
### Common settings
openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
-openshift_logging_fluentd_cpu_limit: 100m
+openshift_logging_fluentd_cpu_limit: null
+openshift_logging_fluentd_cpu_request: 100m
openshift_logging_fluentd_memory_limit: 512Mi
openshift_logging_fluentd_hosts: ['--all']
@@ -55,4 +56,8 @@ openshift_logging_fluentd_aggregating_passphrase: none
#fluentd_throttle_contents:
#fluentd_secureforward_contents:
-openshift_logging_fluentd_file_buffer_limit: 1Gi
+openshift_logging_fluentd_file_buffer_limit: 256Mi
+
+# Configure fluentd to tail audit log file and filter out container engine's logs from there
+# These logs are then stored in ES operation index
+openshift_logging_fluentd_audit_container_engine: False
diff --git a/roles/openshift_logging_fluentd/tasks/main.yaml b/roles/openshift_logging_fluentd/tasks/main.yaml
index 37960afd1..2f89c3f9f 100644
--- a/roles/openshift_logging_fluentd/tasks/main.yaml
+++ b/roles/openshift_logging_fluentd/tasks/main.yaml
@@ -108,7 +108,6 @@
src: secure-forward.conf
dest: "{{ tempdir }}/secure-forward.conf"
when: fluentd_secureforward_contents is undefined
-
changed_when: no
- copy:
@@ -155,7 +154,6 @@
path: "{{ generated_certs_dir }}/system.logging.fluentd.crt"
# create Fluentd daemonset
-
# this should change based on the type of fluentd deployment to be done...
# TODO: pass in aggregation configurations
- name: Generate logging-fluentd daemonset definition
@@ -173,6 +171,12 @@
ops_port: "{{ openshift_logging_fluentd_ops_port }}"
fluentd_nodeselector_key: "{{ openshift_logging_fluentd_nodeselector.keys()[0] }}"
fluentd_nodeselector_value: "{{ openshift_logging_fluentd_nodeselector.values()[0] }}"
+ fluentd_cpu_limit: "{{ openshift_logging_fluentd_cpu_limit }}"
+ fluentd_cpu_request: "{{ openshift_logging_fluentd_cpu_request | min_cpu(openshift_logging_fluentd_cpu_limit | default(none)) }}"
+ fluentd_memory_limit: "{{ openshift_logging_fluentd_memory_limit }}"
+ audit_container_engine: "{{ openshift_logging_fluentd_audit_container_engine | default(False) | bool }}"
+ audit_log_file: "{{ openshift_logging_fluentd_audit_file | default() }}"
+ audit_pos_log_file: "{{ openshift_logging_fluentd_audit_pos_file | default() }}"
check_mode: no
changed_when: no
diff --git a/roles/openshift_logging_fluentd/templates/fluentd.j2 b/roles/openshift_logging_fluentd/templates/fluentd.j2
index b5f27b60d..10283316c 100644
--- a/roles/openshift_logging_fluentd/templates/fluentd.j2
+++ b/roles/openshift_logging_fluentd/templates/fluentd.j2
@@ -29,15 +29,30 @@ spec:
containers:
- name: "{{ daemonset_container_name }}"
image: "{{ openshift_logging_fluentd_image_prefix }}{{ daemonset_name }}:{{ openshift_logging_fluentd_image_version }}"
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
securityContext:
privileged: true
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
resources:
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_limit is defined and fluentd_cpu_limit is not none) %}
limits:
- cpu: {{ openshift_logging_fluentd_cpu_limit }}
- memory: {{ openshift_logging_fluentd_memory_limit }}
+{% if fluentd_cpu_limit is not none %}
+ cpu: "{{fluentd_cpu_limit}}"
+{% endif %}
+{% if fluentd_memory_limit is not none %}
+ memory: "{{fluentd_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (fluentd_memory_limit is defined and fluentd_memory_limit is not none) or (fluentd_cpu_request is defined and fluentd_cpu_request is not none) %}
requests:
- memory: {{ openshift_logging_fluentd_memory_limit }}
+{% if fluentd_cpu_request is not none %}
+ cpu: "{{fluentd_cpu_request}}"
+{% endif %}
+{% if fluentd_memory_limit is not none %}
+ memory: "{{fluentd_memory_limit}}"
+{% endif %}
+{% endif %}
+{% endif %}
volumeMounts:
- name: runlogjournal
mountPath: /run/log/journal
@@ -66,7 +81,9 @@ spec:
readOnly: true
- name: filebufferstorage
mountPath: /var/lib/fluentd
-{% if openshift_logging_mux_client_mode is defined %}
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: muxcerts
mountPath: /etc/fluent/muxkeys
readOnly: true
@@ -113,8 +130,10 @@ spec:
containerName: "{{ daemonset_container_name }}"
resource: limits.memory
- name: "FILE_BUFFER_LIMIT"
- value: "{{ openshift_logging_fluentd_file_buffer_limit | default('1Gi') }}"
-{% if openshift_logging_mux_client_mode is defined %}
+ value: "{{ openshift_logging_fluentd_file_buffer_limit | default('256i') }}"
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: "MUX_CLIENT_MODE"
value: "{{ openshift_logging_mux_client_mode }}"
{% endif %}
@@ -168,6 +187,28 @@ spec:
value: "{{ openshift_logging_fluentd_remote_syslog_payload_key }}"
{% endif %}
+{% if audit_container_engine %}
+ - name: "AUDIT_CONTAINER_ENGINE"
+ value: "{{ audit_container_engine | lower }}"
+{% endif %}
+
+{% if audit_container_engine %}
+ - name: "NODE_NAME"
+ valueFrom:
+ fieldRef:
+ fieldPath: spec.nodeName
+{% endif %}
+
+{% if audit_log_file != '' %}
+ - name: AUDIT_FILE
+ value: "{{ audit_log_file }}"
+{% endif %}
+
+{% if audit_pos_log_file != '' %}
+ - name: AUDIT_POS_FILE
+ value: "{{ audit_pos_log_file }}"
+{% endif %}
+
volumes:
- name: runlogjournal
hostPath:
@@ -196,7 +237,9 @@ spec:
- name: dockerdaemoncfg
hostPath:
path: /etc/docker
-{% if openshift_logging_mux_client_mode is defined %}
+{% if openshift_logging_mux_client_mode is defined and
+ ((openshift_logging_mux_allow_external is defined and openshift_logging_mux_allow_external | bool) or
+ (openshift_logging_use_mux is defined and openshift_logging_use_mux | bool)) %}
- name: muxcerts
secret:
secretName: logging-mux
diff --git a/roles/openshift_logging_kibana/defaults/main.yml b/roles/openshift_logging_kibana/defaults/main.yml
index ee265bb14..1366e96cd 100644
--- a/roles/openshift_logging_kibana/defaults/main.yml
+++ b/roles/openshift_logging_kibana/defaults/main.yml
@@ -9,6 +9,7 @@ openshift_logging_kibana_namespace: logging
openshift_logging_kibana_nodeselector: ""
openshift_logging_kibana_cpu_limit: null
+openshift_logging_kibana_cpu_request: 100m
openshift_logging_kibana_memory_limit: 736Mi
openshift_logging_kibana_hostname: "{{ openshift_hosted_logging_hostname | default('kibana.' ~ (openshift_master_default_subdomain | default('router.default.svc.cluster.local', true))) }}"
@@ -28,6 +29,7 @@ openshift_logging_kibana_proxy_image_prefix: "{{ openshift_logging_image_prefix
openshift_logging_kibana_proxy_image_version: "{{ openshift_logging_image_version | default('latest') }}"
openshift_logging_kibana_proxy_debug: false
openshift_logging_kibana_proxy_cpu_limit: null
+openshift_logging_kibana_proxy_cpu_request: 100m
openshift_logging_kibana_proxy_memory_limit: 256Mi
#The absolute path on the control node to the cert file to use
diff --git a/roles/openshift_logging_kibana/tasks/main.yaml b/roles/openshift_logging_kibana/tasks/main.yaml
index e17e8c1f2..8ef8ede9a 100644
--- a/roles/openshift_logging_kibana/tasks/main.yaml
+++ b/roles/openshift_logging_kibana/tasks/main.yaml
@@ -230,8 +230,10 @@
es_host: "{{ openshift_logging_kibana_es_host }}"
es_port: "{{ openshift_logging_kibana_es_port }}"
kibana_cpu_limit: "{{ openshift_logging_kibana_cpu_limit }}"
+ kibana_cpu_request: "{{ openshift_logging_kibana_cpu_request | min_cpu(openshift_logging_kibana_cpu_limit | default(none)) }}"
kibana_memory_limit: "{{ openshift_logging_kibana_memory_limit }}"
kibana_proxy_cpu_limit: "{{ openshift_logging_kibana_proxy_cpu_limit }}"
+ kibana_proxy_cpu_request: "{{ openshift_logging_kibana_proxy_cpu_request | min_cpu(openshift_logging_kibana_proxy_cpu_limit | default(none)) }}"
kibana_proxy_memory_limit: "{{ openshift_logging_kibana_proxy_memory_limit }}"
kibana_replicas: "{{ openshift_logging_kibana_replicas | default (1) }}"
kibana_node_selector: "{{ openshift_logging_kibana_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_kibana/templates/kibana.j2 b/roles/openshift_logging_kibana/templates/kibana.j2
index da1386d3e..4ff86729a 100644
--- a/roles/openshift_logging_kibana/templates/kibana.j2
+++ b/roles/openshift_logging_kibana/templates/kibana.j2
@@ -37,18 +37,27 @@ spec:
-
name: "kibana"
image: {{ image }}
- imagePullPolicy: Always
-{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
+ imagePullPolicy: IfNotPresent
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
resources:
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_limit is defined and kibana_cpu_limit is not none and kibana_cpu_limit != "") %}
limits:
-{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
+{% if kibana_cpu_limit is not none and kibana_cpu_limit != "" %}
cpu: "{{ kibana_cpu_limit }}"
-{% endif %}
-{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
+{% endif %}
+{% endif %}
+{% if (kibana_memory_limit is defined and kibana_memory_limit is not none and kibana_memory_limit != "") or (kibana_cpu_request is defined and kibana_cpu_request is not none and kibana_cpu_request != "") %}
requests:
+{% if kibana_cpu_request is not none and kibana_cpu_request != "" %}
+ cpu: "{{ kibana_cpu_request }}"
+{% endif %}
+{% if kibana_memory_limit is not none and kibana_memory_limit != "" %}
memory: "{{ kibana_memory_limit }}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
env:
- name: "ES_HOST"
@@ -75,18 +84,27 @@ spec:
-
name: "kibana-proxy"
image: {{ proxy_image }}
- imagePullPolicy: Always
-{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
+ imagePullPolicy: IfNotPresent
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
resources:
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_limit is defined and kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "") %}
limits:
-{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
+{% if kibana_proxy_cpu_limit is not none and kibana_proxy_cpu_limit != "" %}
cpu: "{{ kibana_proxy_cpu_limit }}"
-{% endif %}
-{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
+{% endif %}
+{% endif %}
+{% if (kibana_proxy_memory_limit is defined and kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "") or (kibana_proxy_cpu_request is defined and kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "") %}
requests:
+{% if kibana_proxy_cpu_request is not none and kibana_proxy_cpu_request != "" %}
+ cpu: "{{ kibana_proxy_cpu_request }}"
+{% endif %}
+{% if kibana_proxy_memory_limit is not none and kibana_proxy_memory_limit != "" %}
memory: "{{ kibana_proxy_memory_limit }}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
ports:
-
diff --git a/roles/openshift_logging_mux/defaults/main.yml b/roles/openshift_logging_mux/defaults/main.yml
index 68412aec8..9de686576 100644
--- a/roles/openshift_logging_mux/defaults/main.yml
+++ b/roles/openshift_logging_mux/defaults/main.yml
@@ -9,10 +9,11 @@ openshift_logging_mux_namespace: logging
### Common settings
openshift_logging_mux_nodeselector: "{{ openshift_hosted_logging_mux_nodeselector_label | default('') | map_from_pairs }}"
-openshift_logging_mux_cpu_limit: 500m
-openshift_logging_mux_memory_limit: 2Gi
-openshift_logging_mux_buffer_queue_limit: 1024
-openshift_logging_mux_buffer_size_limit: 1m
+openshift_logging_mux_cpu_limit: null
+openshift_logging_mux_cpu_request: 100m
+openshift_logging_mux_memory_limit: 512Mi
+openshift_logging_mux_buffer_queue_limit: 32
+openshift_logging_mux_buffer_size_limit: 8m
openshift_logging_mux_replicas: 1
@@ -57,11 +58,11 @@ openshift_logging_mux_file_buffer_storage_type: "emptydir"
openshift_logging_mux_file_buffer_pvc_name: "logging-mux-pvc"
# required if the PVC does not already exist
-openshift_logging_mux_file_buffer_pvc_size: 4Gi
+openshift_logging_mux_file_buffer_pvc_size: 1Gi
openshift_logging_mux_file_buffer_pvc_dynamic: false
openshift_logging_mux_file_buffer_pvc_pv_selector: {}
openshift_logging_mux_file_buffer_pvc_access_modes: ['ReadWriteOnce']
openshift_logging_mux_file_buffer_storage_group: '65534'
openshift_logging_mux_file_buffer_pvc_prefix: "logging-mux"
-openshift_logging_mux_file_buffer_limit: 2Gi
+openshift_logging_mux_file_buffer_limit: 256Mi
diff --git a/roles/openshift_logging_mux/tasks/main.yaml b/roles/openshift_logging_mux/tasks/main.yaml
index 2ec863afa..5b257139e 100644
--- a/roles/openshift_logging_mux/tasks/main.yaml
+++ b/roles/openshift_logging_mux/tasks/main.yaml
@@ -171,6 +171,7 @@
ops_host: "{{ openshift_logging_mux_ops_host }}"
ops_port: "{{ openshift_logging_mux_ops_port }}"
mux_cpu_limit: "{{ openshift_logging_mux_cpu_limit }}"
+ mux_cpu_request: "{{ openshift_logging_mux_cpu_request | min_cpu(openshift_logging_mux_cpu_limit | default(none)) }}"
mux_memory_limit: "{{ openshift_logging_mux_memory_limit }}"
mux_replicas: "{{ openshift_logging_mux_replicas | default(1) }}"
mux_node_selector: "{{ openshift_logging_mux_nodeselector | default({}) }}"
diff --git a/roles/openshift_logging_mux/templates/mux.j2 b/roles/openshift_logging_mux/templates/mux.j2
index 4cc48139f..cfb13d59b 100644
--- a/roles/openshift_logging_mux/templates/mux.j2
+++ b/roles/openshift_logging_mux/templates/mux.j2
@@ -36,18 +36,27 @@ spec:
containers:
- name: "mux"
image: {{image}}
- imagePullPolicy: Always
-{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
+ imagePullPolicy: IfNotPresent
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
resources:
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_limit is defined and mux_cpu_limit is not none) %}
limits:
-{% if mux_cpu_limit is not none %}
+{% if mux_cpu_limit is not none %}
cpu: "{{mux_cpu_limit}}"
-{% endif %}
-{% if mux_memory_limit is not none %}
+{% endif %}
+{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
+{% endif %}
+{% endif %}
+{% if (mux_memory_limit is defined and mux_memory_limit is not none) or (mux_cpu_request is defined and mux_cpu_request is not none) %}
requests:
+{% if mux_cpu_request is not none %}
+ cpu: "{{mux_cpu_request}}"
+{% endif %}
+{% if mux_memory_limit is not none %}
memory: "{{mux_memory_limit}}"
-{% endif %}
+{% endif %}
+{% endif %}
{% endif %}
ports:
- containerPort: "{{ openshift_logging_mux_port }}"
diff --git a/roles/openshift_manageiq/tasks/main.yaml b/roles/openshift_manageiq/tasks/main.yaml
index 7789d2232..088d0b171 100644
--- a/roles/openshift_manageiq/tasks/main.yaml
+++ b/roles/openshift_manageiq/tasks/main.yaml
@@ -1,8 +1,4 @@
---
-- fail:
- msg: "The openshift_manageiq role requires OpenShift Enterprise 3.1 or Origin 1.1."
- when: not openshift.common.version_gte_3_1_or_1_1 | bool
-
- name: Add Management Infrastructure project
oc_project:
name: management-infra
@@ -61,4 +57,3 @@
resource_kind: "{{ item.resource_kind }}"
user: "{{ item.user }}"
with_items: "{{manage_iq_openshift_3_2_tasks}}"
- when: openshift.common.version_gte_3_2_or_1_2 | bool
diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md
new file mode 100644
index 000000000..05ca27913
--- /dev/null
+++ b/roles/openshift_management/README.md
@@ -0,0 +1,582 @@
+# CloudForms Availability
+
+As noted in [Limitations - Product Choice](#product-choice),
+[CloudForms](https://www.redhat.com/en/technologies/management/cloudforms)
+(CFME) 4.6 is not yet released. Until such time, this role is limited
+to installing [ManageIQ](http://manageiq.org) (MIQ), the open source
+project that CFME is based on.
+
+After CFME 4.6 is available to customers this role will enable
+(optional) logic which will install CFME or MIQ based on your
+deployment type (`openshift_deployment_type`):
+
+* `openshift-enterprise` → CloudForms
+* `origin` → ManageIQ
+
+
+# Table of Contents
+
+ * [Introduction](#introduction)
+ * [Important Notes](#important-notes)
+ * [Requirements](#requirements)
+ * [Role Variables](#role-variables)
+ * [Getting Started](#getting-started)
+ * [All Defaults](#all-defaults)
+ * [External NFS Storage](#external-nfs-storage)
+ * [Override PV sizes](#override-pv-sizes)
+ * [Override Memory Requirements](#override-memory-requirements)
+ * [External PostgreSQL Database](#external-postgresql-database)
+ * [Limitations](#limitations)
+ * [Product Choice](#product-choice)
+ * [Configuration](#configuration)
+ * [Database](#database)
+ * [Podified](#podified)
+ * [External](#external)
+ * [Storage Classes](#storage-classes)
+ * [NFS (Default)](#nfs-default)
+ * [NFS External](#nfs-external)
+ * [Cloud Provider](#cloud-provider)
+ * [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only)
+ * [Customization](#customization)
+ * [Container Provider](#container-provider)
+ * [Manually](#manually)
+ * [Automatically](#automatically)
+ * [Multiple Providers](#multiple-providers)
+ * [Uninstall](#uninstall)
+ * [Additional Information](#additional-information)
+
+# Introduction
+
+This role will allow a user to install CFME 4.6 or MIQ on an OCP
+3.7 cluster. The role provides customization options for overriding
+default deployment parameters. This role allows the user to deploy
+different installation flavors:
+
+* **Fully Podified** - In this way all application services are ran as
+ pods in the container platform.
+* **External Database** - In this way the application utilizes an
+ externally hosted database server. All other services are ran in the
+ container platform.
+
+This role includes the following storage class options:
+
+* NFS - **Default** - local, on cluster
+* NFS External - NFS somewhere else, like a storage appliance
+* Cloud Provider - Use automatic storage provisioning from your cloud
+ provider (*gce* or *aws*)
+* Preconfigured - **expert only**, assumes you created everything ahead
+ of time
+
+You may skip ahead to the [Getting Started](#getting-started) section
+now for examples of how to set up your Ansible inventory for various
+deployment configurations. However, you are **strongly urged** to
+first read through the [Configuration](#configuration) and
+[Customization](#customization) sections as well as the following
+[Important Notes](#important-notes).
+
+## Important Notes
+
+Not all parameters are present in **both** template versions (podified
+db and external db). For example, while the podified database template
+has a `POSTGRESQL_MEM_REQ` parameter, no such parameter is present in
+the external db template, as there is no need for this information due
+to there being no databases that require pods.
+
+*Be extra careful* if you are overriding template
+parameters. Including parameters not defined in a template **will
+cause errors**. If you do receive an error during the `Ensure the CFME
+App is created` task, we recommend running the
+[uninstall scripts](#uninstall) first before running the installer
+again.
+
+
+# Requirements
+
+The **default** requirements are listed in the table below. These can
+be overridden through customization parameters (See
+[Customization](#customization), below).
+
+**Note** that the application performance will suffer, or possibly
+even fail to deploy, if these requirements are not satisfied.
+
+
+| Item | Requirement | Description | Customization Parameter |
+|---------------------|---------------|----------------------------------------------|-------------------------------|
+| Application Memory | `≥ 4.0 Gi` | Minimum required memory for the application | `APPLICATION_MEM_REQ` |
+| Application Storage | `≥ 5.0 Gi` | Minimum PV size required for the application | `APPLICATION_VOLUME_CAPACITY` |
+| PostgreSQL Memory | `≥ 6.0 Gi` | Minimum required memory for the database | `POSTGRESQL_MEM_REQ` |
+| PostgreSQL Storage | `≥ 15.0 Gi` | Minimum PV size required for the database | `DATABASE_VOLUME_CAPACITY` |
+| Cluster Hosts | `≥ 3` | Number of hosts in your cluster | |
+
+The implications of this table are summarized below:
+
+* You need several cluster nodes
+* Your cluster nodes must have lots of memory available
+* You will need several GiB's of storage available, either locally or
+ on your cloud provider
+* PV sizes can be changed by providing override values to template
+ parameters (see also: [Customization](#customization))
+
+# Role Variables
+
+The following is a table of the publicly exposed variables that may be
+used in your Ansible inventory to control the behavior of this
+installer.
+
+
+| Variable | Required | Default | Description |
+|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------|
+| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. |
+| `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. |
+| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application |
+| `openshift_management_username` | **No** | `admin` | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) |
+| `openshift_management_password` | **No** | `smartvm` | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) |
+| **PRODUCT CHOICE** | | | | |
+| `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: <ul><li>`miq-template`: ManageIQ using a podified database</li> <li> `miq-template-ext-db`: ManageIQ using an external database</li> <li>`cfme-template`: CloudForms using a podified database<sup>[1]</sup></li> <li> `cfme-template-ext-db`: CloudForms using an external database.<sup>[1]</sup></li></ul> |
+| **STORAGE CLASSES** | | | | |
+| `openshift_management_storage_class` | **No** | `nfs` | Storage type to use, choices: <ul><li>`nfs` - Best used for proof-of-concept installs. Will setup NFS on a cluster host (defaults to your first master in the inventory file) to back the required PVCs. The application requires a PVC and the database (which may be hosted externally) may require a second. PVC minimum required sizes are 5GiB for the MIQ application, and 15GiB for the PostgreSQL database (20GiB minimum available space on a volume/partition if used specifically for NFS purposes)</li> <li>`nfs_external` - You are using an external NFS server, such as a netapp appliance. See the [Configuration - Storage Classes](#storage-classes) section below for required information.</li> <li>`preconfigured` - This CFME role will do NOTHING to modify storage settings. This option assumes expert knowledge and that you have done everything required ahead of time.</li> <li>`cloudprovider` - You are using an OCP cloudprovider integration for your storage class. For this to work you must have already configured the required inventory parameters for your cloud provider. Ensure `openshift_cloudprovider_kind` is defined (aws or gce) and that the applicable cloudprovider parameters are provided. |
+| `openshift_management_storage_nfs_external_hostname` | **No** | `false` | If you are using an *external NFS server*, such as a netapp appliance, then you must set the hostname here. Leave the value as `false` if you are not using external NFS. <br /> *Additionally*: **External NFS REQUIRES** that you create the NFS exports that will back the application PV and optionally the database PV.
+| `openshift_management_storage_nfs_base_dir` | **No** | `/exports/` | If you are using **External NFS** then you may set the base path to the exports location here. <br />**Local NFS Note**: You *may* also change this value if you want to change the default path used for local NFS exports. |
+| `openshift_management_storage_nfs_local_hostname` | **No** | `false` | If you do not have an `[nfs]` group in your inventory, or want to simply manually define the local NFS host in your cluster, set this parameter to the hostname of the preferred NFS server. The server must be a part of your OCP/Origin cluster. |
+| **CUSTOMIZATION OPTIONS** | | | | |
+| `openshift_management_template_parameters` | **No** | `{}` | A dictionary of any parameters you want to override in the application/pv templates.
+
+* <sup>[1]</sup> The `cfme-template`s will be available and
+ automatically detected once CFME 4.6 is released
+
+
+# Getting Started
+
+Below are some inventory snippets that can help you get started right
+away.
+
+If you want to install CFME/MIQ at the same time you install your
+OCP/Origin cluster, ensure that `openshift_management_install_management` is set
+to `true` in your inventory. Call the standard
+`playbooks/byo/config.yml` playbook to begin the cluster and CFME/MIQ
+installation.
+
+If you are installing CFME/MIQ on an *already provisioned cluster*
+then you can call the CFME/MIQ playbook directly:
+
+```
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/config.yml
+```
+
+*Note: Use `miq-template` in the following examples for ManageIQ installs*
+
+## All Defaults
+
+This example is the simplest. All of the default values and choices
+are used. This will result in a fully podified CFME installation. All
+application components, as well as the PostgreSQL database will be
+created as pods in the container platform.
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template
+```
+
+## External NFS Storage
+
+This is as the previous example, except that instead of using local
+NFS services in the cluster it will use an external NFS server (such
+as a storage appliance). Note the two new parameters:
+
+* `openshift_management_storage_class` - set to `nfs_external`
+* `openshift_management_storage_nfs_external_hostname` - set to the hostname
+ of the NFS server
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template
+openshift_management_storage_class=nfs_external
+openshift_management_storage_nfs_external_hostname=nfs.example.com
+```
+
+If the external NFS host exports directories under a different parent
+directory, such as `/exports/hosted/prod` then we would add an
+additional parameter, `openshift_management_storage_nfs_base_dir`:
+
+```ini
+# ...
+openshift_management_storage_nfs_base_dir=/exports/hosted/prod
+```
+
+## Override PV sizes
+
+This example will override the PV sizes. Note that we set the PV sizes
+in the template parameters, `openshift_management_template_parameters`. This
+ensures that the application/db will be able to make claims on created
+PVs without clobbering each other.
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template
+openshift_management_template_parameters={'APPLICATION_VOLUME_CAPACITY': '10Gi', 'DATABASE_VOLUME_CAPACITY': '25Gi'}
+```
+
+## Override Memory Requirements
+
+In a test or proof-of-concept installation you may need to reduce the
+application/database memory requirements to fit within your
+capacity. Note that reducing memory limits can result in reduced
+performance or a complete failure to initialize the application.
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template
+openshift_management_template_parameters={'APPLICATION_MEM_REQ': '3000Mi', 'POSTGRESQL_MEM_REQ': '1Gi', 'ANSIBLE_MEM_REQ': '512Mi'}
+```
+
+Here we have instructed the installer to process the application
+template with the parameter `APPLICATION_MEM_REQ` set to `3000Mi`,
+`POSTGRESQL_MEM_REQ` set to `1Gi`, and `ANSIBLE_MEM_REQ` set to
+`512Mi`.
+
+These parameters can be combined with the PV size override parameters
+displayed in the previous example.
+
+## External PostgreSQL Database
+
+To use an external database you must change the
+`openshift_management_app_template` parameter value to `miq-template-ext-db`
+or `cfme-template-ext-db`.
+
+Additionally, database connection information **must** be supplied in
+the `openshift_management_template_parameters` customization parameter. See
+[Customization - Database - External](#external) for more
+information.
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template-ext-db
+openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}
+```
+
+**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be
+able to deploy the app successfully.
+
+# Limitations
+
+This release is the first OpenShift CFME release in the OCP 3.7
+series. It is not complete yet.
+
+## Product Choice
+
+Due to staggered release dates, **CFME support is not
+integrated**. Presently this role will only deploy a ManageIQ
+installation. This role will be updated once CFME 4.6 is released and
+this limitation note will be removed.
+
+# Configuration
+
+Before you can deploy CFME you must decide *how* you want to deploy
+it. There are two major decisions to make:
+
+1. Do you want an external, or a podified database?
+1. Which storage class will back your PVs?
+
+## Database
+
+### Podified
+
+Any `POSTGRES_*` or `DATABASE_*` template parameters in
+[miq-template.yaml](files/templates/manageiq/miq-template.yaml) or
+[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml)
+may be customized through the `openshift_management_template_parameters`
+hash.
+
+### External
+
+Any `POSTGRES_*` or `DATABASE_*` template parameters in
+[miq-template-ext-db.yaml](files/templates/manageiq/miq-template-ext-db.yaml)
+or
+[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml)
+may be customized through the `openshift_management_template_parameters`
+hash.
+
+External PostgreSQL databases require you to provide database
+connection parameters. You must set the required connection keys in
+the `openshift_management_template_parameters` parameter in your
+inventory. The following keys are required:
+
+* `DATABASE_USER`
+* `DATABASE_PASSWORD`
+* `DATABASE_IP`
+* `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`*
+* `DATABASE_NAME`
+
+**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be
+able to deploy the app successfully.
+
+Your inventory would contain a line similar to this:
+
+```ini
+[OSEv3:vars]
+openshift_management_app_template=cfme-template-ext-db
+openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'}
+```
+
+**Note** the new value for the `openshift_management_app_template`
+parameter, `cfme-template-ext-db` (ManageIQ installations would use
+`miq-template-ext-db` instead).
+
+At run time you may run into errors similar to this:
+
+```
+TASK [openshift_management : Ensure the CFME App is created] ***********************************
+task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74
+Tuesday 03 October 2017 15:30:44 -0400 (0:00:00.056) 0:00:12.278 *******
+{"cmd": "/usr/bin/oc create -f /tmp/postgresql-ZPEWQS -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "Error from server (BadRequest): error when creating \"/tmp/postgresql-ZPEWQS\": Endpoints in version \"v1\" cannot be handled as a Endpoints: [pos 218]: json: decNum: got first char 'f'\n", "stdout": ""}
+```
+
+Or like this:
+
+```
+TASK [openshift_management : Ensure the CFME App is created] ***********************************
+task path: /home/tbielawa/rhat/os/openshift-ansible/roles/openshift_management/tasks/main.yml:74
+Tuesday 03 October 2017 16:05:36 -0400 (0:00:00.052) 0:00:18.948 *******
+fatal: [m01.example.com]: FAILED! => {"changed": true, "failed": true, "msg":
+{"cmd": "/usr/bin/oc create -f /tmp/postgresql-igS5sx -n openshift-management", "kind": "Endpoints", "results": {}, "returncode": 1, "stderr": "The Endpoints \"postgresql\" is invalid: subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP address, (e.g. 10.9.8.7)\n", "stdout": ""},
+```
+
+While intimidating at first, there are useful bits of information in
+here. Examine the error output closely and we can tell exactly what is
+wrong.
+
+In the first example we see `Endpoints in version \"v1\" cannot be
+handled as a Endpoints: [pos 218]: json: decNum: got first char
+...`. This is because in my example I used the value `foo` for the
+parameter `DATABASE_PORT`.
+
+In the second example we see `The Endpoints \"postgresql\" is invalid:
+subsets[0].addresses[0].ip: Invalid value: \"doo\": must be a valid IP
+address ...`. This is because in my example I used the value `doo` in
+the `DATABASE_IP` field.
+
+Luckily for us when the templates are processed behind the scenes they
+are also running type checking validation. So, don't worry, just look
+closely at the errors and ensure you are providing the correct values
+for each parameter.
+
+## Storage Classes
+
+OpenShift CFME supports several storage class options.
+
+### NFS (Default)
+
+The NFS storage class is best suited for proof-of-concept and
+test/demo deployments. It is also the **default** storage class for
+deployments. No additional configuration is required for this
+choice.
+
+Customization is provided through the following role variables:
+
+* `openshift_management_storage_nfs_base_dir`
+* `openshift_management_storage_nfs_local_hostname`
+
+### NFS External
+
+External NFS leans on pre-configured NFS servers to provide exports
+for the required PVs. For external NFS you must have:
+
+* For CFME: a `cfme-app` and optionally a `cfme-db` (for podified database) exports
+* For ManageIQ: an `miq-app` and optionally an `miq-db` (for podified database) exports
+
+Configuration is provided through the following role variables:
+
+* `openshift_management_storage_nfs_external_hostname`
+* `openshift_management_storage_nfs_base_dir`
+
+The `openshift_management_storage_nfs_external_hostname` parameter must be
+set to the hostname or IP of your external NFS server.
+
+If `/exports` is not the parent directory to your exports then you
+must set the base directory via the
+`openshift_management_storage_nfs_base_dir` parameter.
+
+For example, if your server export is `/exports/hosted/prod/cfme-app`
+then you must set
+`openshift_management_storage_nfs_base_dir=/exports/hosted/prod`.
+
+### Cloud Provider
+
+CFME can also use a cloud provider storage to back required PVs. For
+this functionality to work you must have also configured the
+`openshift_cloudprovider_kind` variable and all associated parameters
+specific to your chosen cloud provider.
+
+Using this storage class, when the application is created the required
+PVs will automatically be provisioned using the configured cloud
+provider storage integration.
+
+There are no additional variables to configure the behavior of this
+storage class.
+
+### Preconfigured (Expert Configuration Only)
+
+The *preconfigured* storage class implies that you know exactly what
+you're doing and that all storage requirements have been taken care
+ahead of time. Typically this means that you've already created the
+correctly sized PVs.
+
+There are no additional variables to configure the behavior of this
+storage class.
+
+# Customization
+
+Application and database parameters may be customized by means of the
+`openshift_management_template_parameters` inventory parameter.
+
+**For example**, if you wanted to reduce the memory requirement of the
+PostgreSQL pod then you could configure the parameter like this:
+
+`openshift_management_template_parameters={'POSTGRESQL_MEM_REQ': '1Gi'}`
+
+When the CFME template is processed `1Gi` will be used for the value
+of the `POSTGRESQL_MEM_REQ` template parameter.
+
+Any parameter in the `parameters` section of the
+[miq-template.yaml](files/templates/manageiq/miq-template.yaml) or
+[miq-template-ext-db.yaml](files/templates/manageiq/miq-template-ext-db.yaml)
+may be overridden through the `openshift_management_template_parameters`
+hash. This applies to **CloudForms** installations as well:
+[cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml),
+[cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml).
+
+# Container Provider
+
+There are two methods for enabling container provider integration. You
+can manually add OCP/Origin as a container provider, or you can try
+the playbooks included with this role.
+
+## Manually
+
+See the online documentation for steps to manually add you cluster as
+a container provider:
+
+* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers)
+
+## Automatically
+
+Automated container provider integration can be accomplished using the
+playbooks included with this role.
+
+This playbook will:
+
+1. Gather the necessary authentication secrets
+1. Find the public routes to the Management app and the cluster API
+1. Make a REST call to add this cluster as a container provider
+
+
+```
+$ ansible-playbook -v -i <YOUR_INVENTORY> playbooks/byo/openshift-management/add_container_provider.yml
+```
+
+## Multiple Providers
+
+As well as providing playbooks to integrate your *current* container
+platform into the management service, this role includes a **tech
+preview** script which allows you to add multiple container platforms
+as container providers in any arbitrary MIQ/CFME server.
+
+Using the multiple-provider script requires manual configuration and
+setting an `EXTRA_VARS` parameter on the command-line.
+
+
+1. Copy the
+ [container_providers.yml](files/examples/container_providers.yml)
+ example somewhere, such as `/tmp/cp.yml`
+1. If you changed your CFME/MIQ name or password, update the
+ `hostname`, `user`, and `password` parameters in the
+ `management_server` key in the `container_providers.yml` file copy
+1. Fill in an entry under the `container_providers` key for *each* OCP
+ or Origin cluster you want to add as container providers
+
+**Parameters Which MUST Be Configured:**
+
+* `auth_key` - This is the token of a service account which has admin capabilities on the cluster.
+* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname.
+* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique.
+
+*Note*: You can obtain the `auth_key` bearer token from your clusters
+ with this command: `oc serviceaccounts get-token -n management-infra
+ management-admin`
+
+**Parameters Which MAY Be Configured:**
+
+* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443`
+* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time.
+
+
+Let's see an example describing the following scenario:
+
+* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml`
+* You're adding two OCP clusters
+* Your management server runs on `mgmt.example.com`
+
+You would customize `/tmp/cp.yml` as such:
+
+```yaml
+---
+container_providers:
+ - connection_configurations:
+ - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "ocp-prod.example.com"
+ name: OCP Production
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+ - connection_configurations:
+ - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "ocp-test.example.com"
+ name: OCP Testing
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+ hostname: "mgmt.example.com"
+ user: admin
+ password: b3tt3r_p4SSw0rd
+```
+
+Then you will run the many-container-providers integration script. You
+**must** provide the path to the container providers configuration
+file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e`
+(or `--extra-vars`) parameter to set `container_providers_config` to
+the config file path.
+
+```
+$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \
+ playbooks/byo/openshift-management/add_many_container_providers.yml
+```
+
+Afterwards you will find two new container providers in your
+management service. Navigate to `Compute` → `Containers` → `Providers`
+to see an overview.
+
+# Uninstall
+
+This role includes a playbook to uninstall and erase the CFME/MIQ
+installation:
+
+* `playbooks/byo/openshift-management/uninstall.yml`
+
+NFS export definitions and data stored on NFS exports are not
+automatically removed. You are urged to manually erase any data from
+old application or database deployments before attempting to
+initialize a new deployment.
+
+# Additional Information
+
+The upstream project,
+[@manageiq/manageiq-pods](https://github.com/ManageIQ/manageiq-pods),
+contains a wealth of additional information useful for managing and
+operating your CFME installation. Topics include:
+
+* [Verifying Successful Installation](https://github.com/ManageIQ/manageiq-pods#verifying-the-setup-was-successful)
+* [Disabling Image Change Triggers](https://github.com/ManageIQ/manageiq-pods#disable-image-change-triggers)
+* [Scaling CFME](https://github.com/ManageIQ/manageiq-pods#scale-miq)
+* [Backing up and Restoring the DB](https://github.com/ManageIQ/manageiq-pods#backup-and-restore-of-the-miq-database)
+* [Troubleshooting](https://github.com/ManageIQ/manageiq-pods#troubleshooting)
diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml
new file mode 100644
index 000000000..8ba65b386
--- /dev/null
+++ b/roles/openshift_management/defaults/main.yml
@@ -0,0 +1,104 @@
+---
+# Namespace for the CFME project
+openshift_management_project: openshift-management
+# Namespace/project description
+openshift_management_project_description: CloudForms Management Engine
+
+######################################################################
+# BASE TEMPLATE AND DATABASE OPTIONS
+######################################################################
+# Which flavor of CFME would you like? You may install CFME using a
+# podified PostgreSQL server, or you may choose to use an existing
+# PostgreSQL server.
+#
+# Choose 'miq-template' for a podified database install
+# Choose 'miq-template-ext-db' for an external database install
+openshift_management_app_template: miq-template
+# If you are using the miq-template-ext-db template then you must add
+# the required database parameters to the
+# openshift_management_template_parameters variable.
+
+######################################################################
+# STORAGE OPTIONS
+######################################################################
+# DEFAULT - 'nfs'
+# Allowed options: nfs, nfs_external, preconfigured, cloudprovider.
+openshift_management_storage_class: nfs
+# * nfs - Best used for proof-of-concept installs. Will setup NFS on a
+# cluster host (defaults to your first master in the inventory file)
+# to back the required PVCs. The application requires a PVC and the
+# database (which may be hosted externally) may require a
+# second. PVC minimum required sizes are: 5GiB for the MIQ
+# application, and 15GiB for the PostgreSQL database (20GiB minimum
+# available space on an volume/partition if used specifically for
+# NFS purposes)
+#
+# * nfs_external - You are using an external NFS server, such as a
+# netapp appliance. See the STORAGE - NFS OPTIONS section below for
+# required information.
+#
+# * preconfigured - This CFME role will do NOTHING to modify storage
+# settings. This option assumes expert knowledge and that you have
+# done everything required ahead of time.
+#
+# * cloudprovider - You are using an OCP cloudprovider integration for
+# your storage class. For this to work you must have already
+# configured the required inventory parameters for your cloud
+# provider
+#
+# Ensure 'openshift_cloudprovider_kind' is defined (aws or gce) and
+# that the applicable cloudprovider parameters are provided.
+
+#---------------------------------------------------------------------
+# STORAGE - NFS OPTIONS
+#---------------------------------------------------------------------
+# [OPTIONAL] - If you are using an EXTERNAL NFS server, such as a
+# netapp appliance, then you must set the hostname here. Leave the
+# value as 'false' if you are not using external NFS.
+openshift_management_storage_nfs_external_hostname: false
+# [OPTIONAL] - If you are using external NFS then you must set the base
+# path to the exports location here.
+#
+# Additionally: EXTERNAL NFS REQUIRES that YOU CREATE the nfs exports
+# that will back the application PV and optionally the database
+# pv. Export path definitions, relative to
+# {{ openshift_management_storage_nfs_base_dir }}
+#
+# LOCAL NFS NOTE:
+#
+# You may may also change this value if you want to change the default
+# path used for local NFS exports.
+openshift_management_storage_nfs_base_dir: /exports
+#
+# LOCAL NFS NOTE:
+#
+# You may override the automatically selected LOCAL NFS server by
+# setting this variable. Useful for testing specific task files.
+openshift_management_storage_nfs_local_hostname: false
+
+######################################################################
+# DEFAULT ACCOUNT INFORMATION
+######################################################################
+# These are the default values for the username and password of the
+# management app. Changing these values in your inventory will not
+# change your username or password. You should only need to change
+# these values in your inventory if you already changed the actual
+# name and password AND are trying to use integration scripts.
+#
+# For example, adding this cluster as a container provider,
+# playbooks/byo/openshift-management/add_container_provider.yml
+openshift_management_username: admin
+openshift_management_password: smartvm
+
+######################################################################
+# SCAFFOLDING - These are parameters we pre-seed that a user may or
+# may not set later
+######################################################################
+# A hash of parameters you want to override or set in the
+# miq-template.yaml or miq-template-ext-db.yaml templates. Set this in
+# your inventory file as a simple hash. Acceptable values are defined
+# under the .parameters list in files/miq-template{-ext-db}.yaml
+# Example:
+#
+# openshift_management_template_parameters={'APPLICATION_MEM_REQ': '512Mi'}
+openshift_management_template_parameters: {}
diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml
new file mode 100644
index 000000000..661f62e4d
--- /dev/null
+++ b/roles/openshift_management/files/examples/container_providers.yml
@@ -0,0 +1,22 @@
+---
+container_providers:
+ - connection_configurations:
+ - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "OCP/Origin cluster hostname (providing API access)"
+ name: openshift-management
+ port: 8443
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
+# Copy and update for as many OCP or Origin providers as you want to
+# add to your management service
+ # - connection_configurations:
+ # - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken}
+ # endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ # hostname: "OCP/Origin cluster hostname (providing API access)"
+ # name: openshift-management
+ # port: 8443
+ # type: "ManageIQ::Providers::Openshift::ContainerManager"
+management_server:
+ hostname: "Management server hostname (providing API access)"
+ user: admin
+ password: smartvm
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml
new file mode 100644
index 000000000..c3bc1d20c
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-job.yaml
@@ -0,0 +1,28 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cloudforms-backup
+spec:
+ template:
+ metadata:
+ name: cloudforms-backup
+ spec:
+ containers:
+ - name: postgresql
+ image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest
+ command:
+ - "/opt/rh/cfme-container-scripts/backup_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: cloudforms-secrets
+ key: database-url
+ volumeMounts:
+ - name: cfme-backup-vol
+ mountPath: "/backups"
+ volumes:
+ - name: cfme-backup-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-backup
+ restartPolicy: Never
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml
new file mode 100644
index 000000000..92598ce82
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-backup-pvc.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: cloudforms-backup
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 15Gi
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml
new file mode 100644
index 000000000..4fe349897
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-backup-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: cfme-pv03
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "/exports/cfme-pv03"
+ server: "<your-nfs-host-here>"
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml
new file mode 100644
index 000000000..0cdd821b5
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-db-example.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms-db-pv
+metadata:
+ name: cloudforms-db-pv
+ annotations:
+ description: PV Template for CFME PostgreSQL DB
+ tags: PVS, CFME
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cfme-db
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "${BASE_PATH}/cfme-db"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for DB
+ required: true
+ description: The size of the CFME DB PV given in Gi
+ value: 15Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml
new file mode 100644
index 000000000..527090ae8
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-pv-server-example.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms-app-pv
+metadata:
+ name: cloudforms-app-pv
+ annotations:
+ description: PV Template for CFME Server
+ tags: PVS, CFME
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: cfme-app
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "${BASE_PATH}/cfme-app"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for App
+ required: true
+ description: The size of the CFME APP PV given in Gi
+ value: 5Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml
new file mode 100644
index 000000000..8b23f8a33
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-restore-job.yaml
@@ -0,0 +1,35 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: cloudforms-restore
+spec:
+ template:
+ metadata:
+ name: cloudforms-restore
+ spec:
+ containers:
+ - name: postgresql
+ image: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql:latest
+ command:
+ - "/opt/rh/cfme-container-scripts/restore_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: cloudforms-secrets
+ key: database-url
+ - name: BACKUP_VERSION
+ value: latest
+ volumeMounts:
+ - name: cfme-backup-vol
+ mountPath: "/backups"
+ - name: cfme-prod-vol
+ mountPath: "/restore"
+ volumes:
+ - name: cfme-backup-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-backup
+ - name: cfme-prod-vol
+ persistentVolumeClaim:
+ claimName: cloudforms-postgresql
+ restartPolicy: Never
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml
new file mode 100644
index 000000000..d2ece9298
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-scc-sysadmin.yaml
@@ -0,0 +1,38 @@
+allowHostDirVolumePlugin: false
+allowHostIPC: false
+allowHostNetwork: false
+allowHostPID: false
+allowHostPorts: false
+allowPrivilegedContainer: false
+allowedCapabilities:
+apiVersion: v1
+defaultAddCapabilities:
+- SYS_ADMIN
+fsGroup:
+ type: RunAsAny
+groups:
+- system:cluster-admins
+kind: SecurityContextConstraints
+metadata:
+ annotations:
+ kubernetes.io/description: cfme-sysadmin provides all features of the anyuid SCC but allows users to have SYS_ADMIN capabilities. This is the required scc for Pods requiring to run with systemd and the message bus.
+ creationTimestamp:
+ name: cfme-sysadmin
+priority: 10
+readOnlyRootFilesystem: false
+requiredDropCapabilities:
+- MKNOD
+- SYS_CHROOT
+runAsUser:
+ type: RunAsAny
+seLinuxContext:
+ type: MustRunAs
+supplementalGroups:
+ type: RunAsAny
+users:
+volumes:
+- configMap
+- downwardAPI
+- emptyDir
+- persistentVolumeClaim
+- secret
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml
new file mode 100644
index 000000000..4a04f3372
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-template-ext-db.yaml
@@ -0,0 +1,763 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms-ext-db
+metadata:
+ name: cloudforms-ext-db
+ annotations:
+ description: CloudForms appliance with persistent storage using a external DB host
+ tags: instant-app,cloudforms,cfme
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances CloudForms pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for CloudForms backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Remote database service
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: "${{DATABASE_PORT}}"
+ selector: {}
+- apiVersion: v1
+ kind: Endpoints
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ subsets:
+ - addresses:
+ - ip: "${DATABASE_IP}"
+ ports:
+ - port: "${{DATABASE_PORT}}"
+ name: postgresql
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: cfme-privileged
+ serviceAccountName: cfme-privileged
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ ProxyPreserveHost on
+ ProxyPass /ws/ ws://${NAME}/ws/
+ ProxyPassReverse /ws/ ws://${NAME}/ws/
+ ProxyPass / http://${NAME}/
+ ProxyPassReverse / http://${NAME}/
+ </VirtualHost>
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: cfme-httpd
+ serviceAccountName: cfme-httpd
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: cloudforms
+- name: V2_KEY
+ displayName: CloudForms Encryption Key
+ required: true
+ description: Encryption Key for CloudForms Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_IP
+ displayName: PostgreSQL Server IP
+ required: true
+ description: PostgreSQL external server IP used to configure service.
+ value: ''
+- name: DATABASE_PORT
+ displayName: PostgreSQL Server Port
+ required: true
+ description: PostgreSQL external server port used to configure service.
+ value: '5432'
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: FRONTEND_APPLICATION_IMG_NAME
+ displayName: Frontend Application Image Name
+ description: This is the Frontend Application image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui
+- name: BACKEND_APPLICATION_IMG_NAME
+ displayName: Backend Application Image Name
+ description: This is the Backend Application image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the CloudForms Frontend Application image tag/version requested to deploy.
+ value: latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the CloudForms Backend Application image tag/version requested to deploy.
+ value: latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache httpd Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml
new file mode 100644
index 000000000..d7c9f5af7
--- /dev/null
+++ b/roles/openshift_management/files/templates/cloudforms/cfme-template.yaml
@@ -0,0 +1,940 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: cloudforms
+metadata:
+ name: cloudforms
+ annotations:
+ description: CloudForms appliance with persistent storage
+ tags: instant-app,cloudforms,cfme
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: cfme-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ data:
+ 01_miq_overrides.conf: |
+ #------------------------------------------------------------------------------
+ # CONNECTIONS AND AUTHENTICATION
+ #------------------------------------------------------------------------------
+
+ tcp_keepalives_count = 9
+ tcp_keepalives_idle = 3
+ tcp_keepalives_interval = 75
+
+ #------------------------------------------------------------------------------
+ # RESOURCE USAGE (except WAL)
+ #------------------------------------------------------------------------------
+
+ shared_preload_libraries = 'pglogical,repmgr_funcs'
+ max_worker_processes = 10
+
+ #------------------------------------------------------------------------------
+ # WRITE AHEAD LOG
+ #------------------------------------------------------------------------------
+
+ wal_level = 'logical'
+ wal_log_hints = on
+ wal_buffers = 16MB
+ checkpoint_completion_target = 0.9
+
+ #------------------------------------------------------------------------------
+ # REPLICATION
+ #------------------------------------------------------------------------------
+
+ max_wal_senders = 10
+ wal_sender_timeout = 0
+ max_replication_slots = 10
+ hot_standby = on
+
+ #------------------------------------------------------------------------------
+ # ERROR REPORTING AND LOGGING
+ #------------------------------------------------------------------------------
+
+ log_filename = 'postgresql.log'
+ log_rotation_age = 0
+ log_min_duration_statement = 5000
+ log_connections = on
+ log_disconnections = on
+ log_line_prefix = '%t:%r:%c:%u@%d:[%p]:'
+ log_lock_waits = on
+
+ #------------------------------------------------------------------------------
+ # AUTOVACUUM PARAMETERS
+ #------------------------------------------------------------------------------
+
+ log_autovacuum_min_duration = 0
+ autovacuum_naptime = 5min
+ autovacuum_vacuum_threshold = 500
+ autovacuum_analyze_threshold = 500
+ autovacuum_vacuum_scale_factor = 0.05
+
+ #------------------------------------------------------------------------------
+ # LOCK MANAGEMENT
+ #------------------------------------------------------------------------------
+
+ deadlock_timeout = 5s
+
+ #------------------------------------------------------------------------------
+ # VERSION/PLATFORM COMPATIBILITY
+ #------------------------------------------------------------------------------
+
+ escape_string_warning = off
+ standard_conforming_strings = off
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ ProxyPreserveHost on
+ ProxyPass /ws/ ws://${NAME}/ws/
+ ProxyPassReverse /ws/ ws://${NAME}/ws/
+ ProxyPass / http://${NAME}/
+ ProxyPassReverse / http://${NAME}/
+ </VirtualHost>
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances CloudForms pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${DATABASE_VOLUME_CAPACITY}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${FRONTEND_APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for CloudForms backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the CloudForms appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: cloudforms
+ image: "${BACKEND_APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/rh/cfme-container-scripts/sync-pv-data"
+ serviceAccount: cfme-orchestrator
+ serviceAccountName: cfme-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Exposes the database server
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the database
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: cfme-pgdb-volume
+ persistentVolumeClaim:
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ - name: cfme-pg-configs
+ configMap:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ containers:
+ - name: postgresql
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
+ ports:
+ - containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ - name: cfme-pgdb-volume
+ mountPath: "/var/lib/pgsql/data"
+ - name: cfme-pg-configs
+ mountPath: "${POSTGRESQL_CONFIG_DIR}"
+ env:
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${DATABASE_NAME}"
+ - name: POSTGRESQL_MAX_CONNECTIONS
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ - name: POSTGRESQL_SHARED_BUFFERS
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ - name: POSTGRESQL_CONFIG_DIR
+ value: "${POSTGRESQL_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
+ limits:
+ memory: "${POSTGRESQL_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: cfme-privileged
+ serviceAccountName: cfme-privileged
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: cfme-httpd
+ serviceAccountName: cfme-httpd
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: cloudforms
+- name: V2_KEY
+ displayName: CloudForms Encryption Key
+ required: true
+ description: Encryption Key for CloudForms Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: POSTGRESQL_CONFIG_DIR
+ displayName: PostgreSQL Configuration Overrides
+ description: Directory used to store PostgreSQL configuration overrides.
+ value: "/var/lib/pgsql/conf.d"
+- name: POSTGRESQL_MAX_CONNECTIONS
+ displayName: PostgreSQL Max Connections
+ description: PostgreSQL maximum number of database connections allowed.
+ value: '1000'
+- name: POSTGRESQL_SHARED_BUFFERS
+ displayName: PostgreSQL Shared Buffer Amount
+ description: Amount of memory dedicated for PostgreSQL shared memory buffers.
+ value: 1GB
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: POSTGRESQL_CPU_REQ
+ displayName: PostgreSQL Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores).
+ value: 500m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: POSTGRESQL_MEM_REQ
+ displayName: PostgreSQL Min RAM Requested
+ required: true
+ description: Minimum amount of memory the PostgreSQL container will need.
+ value: 4Gi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: POSTGRESQL_MEM_LIMIT
+ displayName: PostgreSQL Max RAM Limit
+ required: true
+ description: Maximum amount of memory the PostgreSQL container can consume.
+ value: 8Gi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: POSTGRESQL_IMG_NAME
+ displayName: PostgreSQL Image Name
+ description: This is the PostgreSQL image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-postgresql
+- name: POSTGRESQL_IMG_TAG
+ displayName: PostgreSQL Image Tag
+ description: This is the PostgreSQL image tag/version requested to deploy.
+ value: latest
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: FRONTEND_APPLICATION_IMG_NAME
+ displayName: Frontend Application Image Name
+ description: This is the Frontend Application image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app-ui
+- name: BACKEND_APPLICATION_IMG_NAME
+ displayName: Backend Application Image Name
+ description: This is the Backend Application image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-app
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the CloudForms Frontend Application image tag/version requested to deploy.
+ value: latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the CloudForms Backend Application image tag/version requested to deploy.
+ value: latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: DATABASE_VOLUME_CAPACITY
+ displayName: Database Volume Capacity
+ required: true
+ description: Volume space available for database.
+ value: 15Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: brew-pulp-docker01.web.prod.ext.phx2.redhat.com:8888/cloudforms46/cfme-openshift-httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml b/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml
new file mode 100644
index 000000000..044cb73a5
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-backup-job.yaml
@@ -0,0 +1,28 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: manageiq-backup
+spec:
+ template:
+ metadata:
+ name: manageiq-backup
+ spec:
+ containers:
+ - name: postgresql
+ image: docker.io/manageiq/postgresql:latest
+ command:
+ - "/opt/manageiq/container-scripts/backup_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: manageiq-secrets
+ key: database-url
+ volumeMounts:
+ - name: miq-backup-vol
+ mountPath: "/backups"
+ volumes:
+ - name: miq-backup-vol
+ persistentVolumeClaim:
+ claimName: manageiq-backup
+ restartPolicy: Never
diff --git a/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml b/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml
new file mode 100644
index 000000000..25696ef23
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-backup-pvc.yaml
@@ -0,0 +1,10 @@
+apiVersion: v1
+kind: PersistentVolumeClaim
+metadata:
+ name: manageiq-backup
+spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: 15Gi
diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml
new file mode 100644
index 000000000..a5cf54d4e
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-pv-backup-example.yaml
@@ -0,0 +1,13 @@
+apiVersion: v1
+kind: PersistentVolume
+metadata:
+ name: miq-pv03
+spec:
+ capacity:
+ storage: 15Gi
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "/exports/miq-pv03"
+ server: "<your-nfs-host-here>"
+ persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml
new file mode 100644
index 000000000..a803bebe2
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-pv-db-example.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: manageiq-db-pv
+metadata:
+ name: manageiq-db-pv
+ annotations:
+ description: PV Template for MIQ PostgreSQL DB
+ tags: PVS, MIQ
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: miq-db
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "${BASE_PATH}/miq-db"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for DB
+ required: true
+ description: The size of the MIQ DB PV given in Gi
+ value: 15Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml b/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml
new file mode 100644
index 000000000..1288544d1
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-pv-server-example.yaml
@@ -0,0 +1,38 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: manageiq-app-pv
+metadata:
+ name: manageiq-app-pv
+ annotations:
+ description: PV Template for MIQ Server
+ tags: PVS, MIQ
+objects:
+- apiVersion: v1
+ kind: PersistentVolume
+ metadata:
+ name: miq-app
+ spec:
+ capacity:
+ storage: "${PV_SIZE}"
+ accessModes:
+ - ReadWriteOnce
+ nfs:
+ path: "${BASE_PATH}/miq-app"
+ server: "${NFS_HOST}"
+ persistentVolumeReclaimPolicy: Retain
+parameters:
+- name: PV_SIZE
+ displayName: PV Size for App
+ required: true
+ description: The size of the MIQ APP PV given in Gi
+ value: 5Gi
+- name: BASE_PATH
+ displayName: Exports Directory Base Path
+ required: true
+ description: The parent directory of your NFS exports
+ value: "/exports"
+- name: NFS_HOST
+ displayName: NFS Server Hostname
+ required: true
+ description: The hostname or IP address of the NFS server
diff --git a/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml b/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml
new file mode 100644
index 000000000..eea284dd4
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-restore-job.yaml
@@ -0,0 +1,35 @@
+apiVersion: batch/v1
+kind: Job
+metadata:
+ name: manageiq-restore
+spec:
+ template:
+ metadata:
+ name: manageiq-restore
+ spec:
+ containers:
+ - name: postgresql
+ image: docker.io/manageiq/postgresql:latest
+ command:
+ - "/opt/manageiq/container-scripts/restore_db"
+ env:
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: manageiq-secrets
+ key: database-url
+ - name: BACKUP_VERSION
+ value: latest
+ volumeMounts:
+ - name: miq-backup-vol
+ mountPath: "/backups"
+ - name: miq-prod-vol
+ mountPath: "/restore"
+ volumes:
+ - name: miq-backup-vol
+ persistentVolumeClaim:
+ claimName: manageiq-backup
+ - name: miq-prod-vol
+ persistentVolumeClaim:
+ claimName: manageiq-postgresql
+ restartPolicy: Never
diff --git a/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml b/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml
new file mode 100644
index 000000000..82cd5d49e
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-template-ext-db.yaml
@@ -0,0 +1,771 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: manageiq-ext-db
+metadata:
+ name: manageiq-ext-db
+ annotations:
+ description: ManageIQ appliance with persistent storage using a external DB host
+ tags: instant-app,manageiq,miq
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances ManageIQ pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the ManageIQ appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MEMCACHED_SERVER
+ value: "${MEMCACHED_SERVICE_NAME}:11211"
+ - name: MEMCACHED_SERVICE_NAME
+ value: "${MEMCACHED_SERVICE_NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_SERVICE_NAME
+ value: "${ANSIBLE_SERVICE_NAME}"
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/manageiq/container-scripts/sync-pv-data"
+ serviceAccount: miq-orchestrator
+ serviceAccountName: miq-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for ManageIQ backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the ManageIQ appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: MEMCACHED_SERVER
+ value: "${MEMCACHED_SERVICE_NAME}:11211"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_SERVICE_NAME
+ value: "${ANSIBLE_SERVICE_NAME}"
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/manageiq/container-scripts/sync-pv-data"
+ serviceAccount: miq-orchestrator
+ serviceAccountName: miq-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Remote database service
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: "${{DATABASE_PORT}}"
+ selector: {}
+- apiVersion: v1
+ kind: Endpoints
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ subsets:
+ - addresses:
+ - ip: "${DATABASE_IP}"
+ ports:
+ - port: "${{DATABASE_PORT}}"
+ name: postgresql
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: miq-privileged
+ serviceAccountName: miq-privileged
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ ProxyPreserveHost on
+ ProxyPass /ws/ ws://${NAME}/ws/
+ ProxyPassReverse /ws/ ws://${NAME}/ws/
+ ProxyPass / http://${NAME}/
+ ProxyPassReverse / http://${NAME}/
+ </VirtualHost>
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: miq-anyuid
+ serviceAccountName: miq-anyuid
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: manageiq
+- name: V2_KEY
+ displayName: ManageIQ Encryption Key
+ required: true
+ description: Encryption Key for ManageIQ Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_IP
+ displayName: PostgreSQL Server IP
+ required: true
+ description: PostgreSQL external server IP used to configure service.
+ value: ''
+- name: DATABASE_PORT
+ displayName: PostgreSQL Server Port
+ required: true
+ description: PostgreSQL external server port used to configure service.
+ value: '5432'
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: docker.io/manageiq/memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_IMG_NAME
+ displayName: Application Image Name
+ description: This is the Application image name requested to deploy.
+ value: docker.io/manageiq/manageiq-pods
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the ManageIQ Frontend Application image tag/version requested to deploy.
+ value: frontend-latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the ManageIQ Backend Application image tag/version requested to deploy.
+ value: backend-latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: docker.io/manageiq/embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: docker.io/manageiq/httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache httpd Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_management/files/templates/manageiq/miq-template.yaml b/roles/openshift_management/files/templates/manageiq/miq-template.yaml
new file mode 100644
index 000000000..3f5a12205
--- /dev/null
+++ b/roles/openshift_management/files/templates/manageiq/miq-template.yaml
@@ -0,0 +1,948 @@
+apiVersion: v1
+kind: Template
+labels:
+ template: manageiq
+metadata:
+ name: manageiq
+ annotations:
+ description: ManageIQ appliance with persistent storage
+ tags: instant-app,manageiq,miq
+ iconClass: icon-rails
+objects:
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-orchestrator
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-anyuid
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-privileged
+- apiVersion: v1
+ kind: ServiceAccount
+ metadata:
+ name: miq-httpd
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${NAME}-secrets"
+ stringData:
+ pg-password: "${DATABASE_PASSWORD}"
+ database-url: postgresql://${DATABASE_USER}:${DATABASE_PASSWORD}@${DATABASE_SERVICE_NAME}/${DATABASE_NAME}?encoding=utf8&pool=5&wait_timeout=5
+ v2-key: "${V2_KEY}"
+- apiVersion: v1
+ kind: Secret
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ stringData:
+ rabbit-password: "${ANSIBLE_RABBITMQ_PASSWORD}"
+ secret-key: "${ANSIBLE_SECRET_KEY}"
+ admin-password: "${ANSIBLE_ADMIN_PASSWORD}"
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ data:
+ 01_miq_overrides.conf: |
+ #------------------------------------------------------------------------------
+ # CONNECTIONS AND AUTHENTICATION
+ #------------------------------------------------------------------------------
+
+ tcp_keepalives_count = 9
+ tcp_keepalives_idle = 3
+ tcp_keepalives_interval = 75
+
+ #------------------------------------------------------------------------------
+ # RESOURCE USAGE (except WAL)
+ #------------------------------------------------------------------------------
+
+ shared_preload_libraries = 'pglogical,repmgr_funcs'
+ max_worker_processes = 10
+
+ #------------------------------------------------------------------------------
+ # WRITE AHEAD LOG
+ #------------------------------------------------------------------------------
+
+ wal_level = 'logical'
+ wal_log_hints = on
+ wal_buffers = 16MB
+ checkpoint_completion_target = 0.9
+
+ #------------------------------------------------------------------------------
+ # REPLICATION
+ #------------------------------------------------------------------------------
+
+ max_wal_senders = 10
+ wal_sender_timeout = 0
+ max_replication_slots = 10
+ hot_standby = on
+
+ #------------------------------------------------------------------------------
+ # ERROR REPORTING AND LOGGING
+ #------------------------------------------------------------------------------
+
+ log_filename = 'postgresql.log'
+ log_rotation_age = 0
+ log_min_duration_statement = 5000
+ log_connections = on
+ log_disconnections = on
+ log_line_prefix = '%t:%r:%c:%u@%d:[%p]:'
+ log_lock_waits = on
+
+ #------------------------------------------------------------------------------
+ # AUTOVACUUM PARAMETERS
+ #------------------------------------------------------------------------------
+
+ log_autovacuum_min_duration = 0
+ autovacuum_naptime = 5min
+ autovacuum_vacuum_threshold = 500
+ autovacuum_analyze_threshold = 500
+ autovacuum_vacuum_scale_factor = 0.05
+
+ #------------------------------------------------------------------------------
+ # LOCK MANAGEMENT
+ #------------------------------------------------------------------------------
+
+ deadlock_timeout = 5s
+
+ #------------------------------------------------------------------------------
+ # VERSION/PLATFORM COMPATIBILITY
+ #------------------------------------------------------------------------------
+
+ escape_string_warning = off
+ standard_conforming_strings = off
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ data:
+ application.conf: |
+ # Timeout: The number of seconds before receives and sends time out.
+ Timeout 120
+
+ RewriteEngine On
+ Options SymLinksIfOwnerMatch
+
+ <VirtualHost *:80>
+ KeepAlive on
+ ProxyPreserveHost on
+ ProxyPass /ws/ ws://${NAME}/ws/
+ ProxyPassReverse /ws/ ws://${NAME}/ws/
+ ProxyPass / http://${NAME}/
+ ProxyPassReverse / http://${NAME}/
+ </VirtualHost>
+- apiVersion: v1
+ kind: ConfigMap
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ data:
+ auth-type: internal
+ auth-configuration.conf: |
+ # External Authentication Configuration File
+ #
+ # For details on usage please see https://github.com/ManageIQ/manageiq-pods/blob/master/README.md#configuring-external-authentication
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances ManageIQ pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"},{"name":"${MEMCACHED_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${NAME}"
+ spec:
+ clusterIP: None
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ selector:
+ name: "${NAME}"
+- apiVersion: v1
+ kind: Route
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ host: "${APPLICATION_DOMAIN}"
+ port:
+ targetPort: http
+ tls:
+ termination: edge
+ insecureEdgeTerminationPolicy: Redirect
+ to:
+ kind: Service
+ name: "${HTTPD_SERVICE_NAME}"
+- apiVersion: v1
+ kind: PersistentVolumeClaim
+ metadata:
+ name: "${NAME}-${DATABASE_SERVICE_NAME}"
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${DATABASE_VOLUME_CAPACITY}"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}"
+ annotations:
+ description: Defines how to deploy the ManageIQ appliance
+ spec:
+ serviceName: "${NAME}"
+ replicas: "${APPLICATION_REPLICA_COUNT}"
+ template:
+ metadata:
+ labels:
+ name: "${NAME}"
+ name: "${NAME}"
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${FRONTEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 80
+ scheme: HTTP
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: MY_POD_NAMESPACE
+ valueFrom:
+ fieldRef:
+ fieldPath: metadata.namespace
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: DATABASE_REGION
+ value: "${DATABASE_REGION}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MEMCACHED_SERVER
+ value: "${MEMCACHED_SERVICE_NAME}:11211"
+ - name: MEMCACHED_SERVICE_NAME
+ value: "${MEMCACHED_SERVICE_NAME}"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_SERVICE_NAME
+ value: "${ANSIBLE_SERVICE_NAME}"
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/manageiq/container-scripts/sync-pv-data"
+ serviceAccount: miq-orchestrator
+ serviceAccountName: miq-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Headless service for ManageIQ backend pods
+ name: "${NAME}-backend"
+ spec:
+ clusterIP: None
+ selector:
+ name: "${NAME}-backend"
+- apiVersion: apps/v1beta1
+ kind: StatefulSet
+ metadata:
+ name: "${NAME}-backend"
+ annotations:
+ description: Defines how to deploy the ManageIQ appliance
+ spec:
+ serviceName: "${NAME}-backend"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${NAME}-backend"
+ name: "${NAME}-backend"
+ spec:
+ containers:
+ - name: manageiq
+ image: "${APPLICATION_IMG_NAME}:${BACKEND_APPLICATION_IMG_TAG}"
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - MIQ Server
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: "${NAME}-server"
+ mountPath: "/persistent"
+ env:
+ - name: APPLICATION_INIT_DELAY
+ value: "${APPLICATION_INIT_DELAY}"
+ - name: DATABASE_URL
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: database-url
+ - name: MIQ_SERVER_DEFAULT_ROLES
+ value: database_operations,event,reporting,scheduler,smartstate,ems_operations,ems_inventory,automate
+ - name: FRONTEND_SERVICE_NAME
+ value: "${NAME}"
+ - name: MEMCACHED_SERVER
+ value: "${MEMCACHED_SERVICE_NAME}:11211"
+ - name: V2_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: v2-key
+ - name: ANSIBLE_SERVICE_NAME
+ value: "${ANSIBLE_SERVICE_NAME}"
+ - name: ANSIBLE_ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ resources:
+ requests:
+ memory: "${APPLICATION_MEM_REQ}"
+ cpu: "${APPLICATION_CPU_REQ}"
+ limits:
+ memory: "${APPLICATION_MEM_LIMIT}"
+ lifecycle:
+ preStop:
+ exec:
+ command:
+ - "/opt/manageiq/container-scripts/sync-pv-data"
+ serviceAccount: miq-orchestrator
+ serviceAccountName: miq-orchestrator
+ terminationGracePeriodSeconds: 90
+ volumeClaimTemplates:
+ - metadata:
+ name: "${NAME}-server"
+ annotations:
+ spec:
+ accessModes:
+ - ReadWriteOnce
+ resources:
+ requests:
+ storage: "${APPLICATION_VOLUME_CAPACITY}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Exposes the memcached server
+ spec:
+ ports:
+ - name: memcached
+ port: 11211
+ targetPort: 11211
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy memcached
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ labels:
+ name: "${MEMCACHED_SERVICE_NAME}"
+ spec:
+ volumes: []
+ containers:
+ - name: memcached
+ image: "${MEMCACHED_IMG_NAME}:${MEMCACHED_IMG_TAG}"
+ ports:
+ - containerPort: 11211
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 5
+ tcpSocket:
+ port: 11211
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 30
+ tcpSocket:
+ port: 11211
+ volumeMounts: []
+ env:
+ - name: MEMCACHED_MAX_MEMORY
+ value: "${MEMCACHED_MAX_MEMORY}"
+ - name: MEMCACHED_MAX_CONNECTIONS
+ value: "${MEMCACHED_MAX_CONNECTIONS}"
+ - name: MEMCACHED_SLAB_PAGE_SIZE
+ value: "${MEMCACHED_SLAB_PAGE_SIZE}"
+ resources:
+ requests:
+ memory: "${MEMCACHED_MEM_REQ}"
+ cpu: "${MEMCACHED_CPU_REQ}"
+ limits:
+ memory: "${MEMCACHED_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Exposes the database server
+ spec:
+ ports:
+ - name: postgresql
+ port: 5432
+ targetPort: 5432
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the database
+ spec:
+ strategy:
+ type: Recreate
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${DATABASE_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${DATABASE_SERVICE_NAME}"
+ labels:
+ name: "${DATABASE_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: miq-pgdb-volume
+ persistentVolumeClaim:
+ claimName: "${NAME}-${DATABASE_SERVICE_NAME}"
+ - name: miq-pg-configs
+ configMap:
+ name: "${DATABASE_SERVICE_NAME}-configs"
+ containers:
+ - name: postgresql
+ image: "${POSTGRESQL_IMG_NAME}:${POSTGRESQL_IMG_TAG}"
+ ports:
+ - containerPort: 5432
+ readinessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 15
+ exec:
+ command:
+ - "/bin/sh"
+ - "-i"
+ - "-c"
+ - psql -h 127.0.0.1 -U ${POSTGRESQL_USER} -q -d ${POSTGRESQL_DATABASE} -c 'SELECT 1'
+ livenessProbe:
+ timeoutSeconds: 1
+ initialDelaySeconds: 60
+ tcpSocket:
+ port: 5432
+ volumeMounts:
+ - name: miq-pgdb-volume
+ mountPath: "/var/lib/pgsql/data"
+ - name: miq-pg-configs
+ mountPath: "${POSTGRESQL_CONFIG_DIR}"
+ env:
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${DATABASE_NAME}"
+ - name: POSTGRESQL_MAX_CONNECTIONS
+ value: "${POSTGRESQL_MAX_CONNECTIONS}"
+ - name: POSTGRESQL_SHARED_BUFFERS
+ value: "${POSTGRESQL_SHARED_BUFFERS}"
+ - name: POSTGRESQL_CONFIG_DIR
+ value: "${POSTGRESQL_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${POSTGRESQL_MEM_REQ}"
+ cpu: "${POSTGRESQL_CPU_REQ}"
+ limits:
+ memory: "${POSTGRESQL_MEM_LIMIT}"
+- apiVersion: v1
+ kind: Service
+ metadata:
+ annotations:
+ description: Exposes and load balances Ansible pods
+ service.alpha.openshift.io/dependencies: '[{"name":"${DATABASE_SERVICE_NAME}","namespace":"","kind":"Service"}]'
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ ports:
+ - name: http
+ port: 80
+ protocol: TCP
+ targetPort: 80
+ - name: https
+ port: 443
+ protocol: TCP
+ targetPort: 443
+ selector:
+ name: "${ANSIBLE_SERVICE_NAME}"
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy the Ansible appliance
+ spec:
+ strategy:
+ type: Recreate
+ serviceName: "${ANSIBLE_SERVICE_NAME}"
+ replicas: 0
+ template:
+ metadata:
+ labels:
+ name: "${ANSIBLE_SERVICE_NAME}"
+ name: "${ANSIBLE_SERVICE_NAME}"
+ spec:
+ containers:
+ - name: ansible
+ image: "${ANSIBLE_IMG_NAME}:${ANSIBLE_IMG_TAG}"
+ livenessProbe:
+ tcpSocket:
+ port: 443
+ initialDelaySeconds: 480
+ timeoutSeconds: 3
+ readinessProbe:
+ httpGet:
+ path: "/"
+ port: 443
+ scheme: HTTPS
+ initialDelaySeconds: 200
+ timeoutSeconds: 3
+ ports:
+ - containerPort: 80
+ protocol: TCP
+ - containerPort: 443
+ protocol: TCP
+ securityContext:
+ privileged: true
+ env:
+ - name: ADMIN_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: admin-password
+ - name: RABBITMQ_USER_NAME
+ value: "${ANSIBLE_RABBITMQ_USER_NAME}"
+ - name: RABBITMQ_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: rabbit-password
+ - name: ANSIBLE_SECRET_KEY
+ valueFrom:
+ secretKeyRef:
+ name: "${ANSIBLE_SERVICE_NAME}-secrets"
+ key: secret-key
+ - name: DATABASE_SERVICE_NAME
+ value: "${DATABASE_SERVICE_NAME}"
+ - name: POSTGRESQL_USER
+ value: "${DATABASE_USER}"
+ - name: POSTGRESQL_PASSWORD
+ valueFrom:
+ secretKeyRef:
+ name: "${NAME}-secrets"
+ key: pg-password
+ - name: POSTGRESQL_DATABASE
+ value: "${ANSIBLE_DATABASE_NAME}"
+ resources:
+ requests:
+ memory: "${ANSIBLE_MEM_REQ}"
+ cpu: "${ANSIBLE_CPU_REQ}"
+ limits:
+ memory: "${ANSIBLE_MEM_LIMIT}"
+ serviceAccount: miq-privileged
+ serviceAccountName: miq-privileged
+- apiVersion: v1
+ kind: Service
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Exposes the httpd server
+ service.alpha.openshift.io/dependencies: '[{"name":"${NAME}","namespace":"","kind":"Service"}]'
+ spec:
+ ports:
+ - name: http
+ port: 80
+ targetPort: 80
+ selector:
+ name: httpd
+- apiVersion: v1
+ kind: DeploymentConfig
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ annotations:
+ description: Defines how to deploy httpd
+ spec:
+ strategy:
+ type: Recreate
+ recreateParams:
+ timeoutSeconds: 1200
+ triggers:
+ - type: ConfigChange
+ replicas: 1
+ selector:
+ name: "${HTTPD_SERVICE_NAME}"
+ template:
+ metadata:
+ name: "${HTTPD_SERVICE_NAME}"
+ labels:
+ name: "${HTTPD_SERVICE_NAME}"
+ spec:
+ volumes:
+ - name: httpd-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-configs"
+ - name: httpd-auth-config
+ configMap:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ containers:
+ - name: httpd
+ image: "${HTTPD_IMG_NAME}:${HTTPD_IMG_TAG}"
+ ports:
+ - containerPort: 80
+ livenessProbe:
+ exec:
+ command:
+ - pidof
+ - httpd
+ initialDelaySeconds: 15
+ timeoutSeconds: 3
+ readinessProbe:
+ tcpSocket:
+ port: 80
+ initialDelaySeconds: 10
+ timeoutSeconds: 3
+ volumeMounts:
+ - name: httpd-config
+ mountPath: "${HTTPD_CONFIG_DIR}"
+ - name: httpd-auth-config
+ mountPath: "${HTTPD_AUTH_CONFIG_DIR}"
+ resources:
+ requests:
+ memory: "${HTTPD_MEM_REQ}"
+ cpu: "${HTTPD_CPU_REQ}"
+ limits:
+ memory: "${HTTPD_MEM_LIMIT}"
+ env:
+ - name: HTTPD_AUTH_TYPE
+ valueFrom:
+ configMapKeyRef:
+ name: "${HTTPD_SERVICE_NAME}-auth-configs"
+ key: auth-type
+ lifecycle:
+ postStart:
+ exec:
+ command:
+ - "/usr/bin/save-container-environment"
+ serviceAccount: miq-anyuid
+ serviceAccountName: miq-anyuid
+parameters:
+- name: NAME
+ displayName: Name
+ required: true
+ description: The name assigned to all of the frontend objects defined in this template.
+ value: manageiq
+- name: V2_KEY
+ displayName: ManageIQ Encryption Key
+ required: true
+ description: Encryption Key for ManageIQ Passwords
+ from: "[a-zA-Z0-9]{43}"
+ generate: expression
+- name: DATABASE_SERVICE_NAME
+ displayName: PostgreSQL Service Name
+ required: true
+ description: The name of the OpenShift Service exposed for the PostgreSQL container.
+ value: postgresql
+- name: DATABASE_USER
+ displayName: PostgreSQL User
+ required: true
+ description: PostgreSQL user that will access the database.
+ value: root
+- name: DATABASE_PASSWORD
+ displayName: PostgreSQL Password
+ required: true
+ description: Password for the PostgreSQL user.
+ from: "[a-zA-Z0-9]{8}"
+ generate: expression
+- name: DATABASE_NAME
+ required: true
+ displayName: PostgreSQL Database Name
+ description: Name of the PostgreSQL database accessed.
+ value: vmdb_production
+- name: DATABASE_REGION
+ required: true
+ displayName: Application Database Region
+ description: Database region that will be used for application.
+ value: '0'
+- name: ANSIBLE_DATABASE_NAME
+ displayName: Ansible PostgreSQL database name
+ required: true
+ description: The database to be used by the Ansible continer
+ value: awx
+- name: MEMCACHED_SERVICE_NAME
+ required: true
+ displayName: Memcached Service Name
+ description: The name of the OpenShift Service exposed for the Memcached container.
+ value: memcached
+- name: MEMCACHED_MAX_MEMORY
+ displayName: Memcached Max Memory
+ description: Memcached maximum memory for memcached object storage in MB.
+ value: '64'
+- name: MEMCACHED_MAX_CONNECTIONS
+ displayName: Memcached Max Connections
+ description: Memcached maximum number of connections allowed.
+ value: '1024'
+- name: MEMCACHED_SLAB_PAGE_SIZE
+ displayName: Memcached Slab Page Size
+ description: Memcached size of each slab page.
+ value: 1m
+- name: POSTGRESQL_CONFIG_DIR
+ displayName: PostgreSQL Configuration Overrides
+ description: Directory used to store PostgreSQL configuration overrides.
+ value: "/var/lib/pgsql/conf.d"
+- name: POSTGRESQL_MAX_CONNECTIONS
+ displayName: PostgreSQL Max Connections
+ description: PostgreSQL maximum number of database connections allowed.
+ value: '1000'
+- name: POSTGRESQL_SHARED_BUFFERS
+ displayName: PostgreSQL Shared Buffer Amount
+ description: Amount of memory dedicated for PostgreSQL shared memory buffers.
+ value: 1GB
+- name: ANSIBLE_SERVICE_NAME
+ displayName: Ansible Service Name
+ description: The name of the OpenShift Service exposed for the Ansible container.
+ value: ansible
+- name: ANSIBLE_ADMIN_PASSWORD
+ displayName: Ansible admin User password
+ required: true
+ description: The password for the Ansible container admin user
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: ANSIBLE_SECRET_KEY
+ displayName: Ansible Secret Key
+ required: true
+ description: Encryption key for the Ansible container
+ from: "[a-f0-9]{32}"
+ generate: expression
+- name: ANSIBLE_RABBITMQ_USER_NAME
+ displayName: RabbitMQ Username
+ required: true
+ description: Username for the Ansible RabbitMQ Server
+ value: ansible
+- name: ANSIBLE_RABBITMQ_PASSWORD
+ displayName: RabbitMQ Server Password
+ required: true
+ description: Password for the Ansible RabbitMQ Server
+ from: "[a-zA-Z0-9]{32}"
+ generate: expression
+- name: APPLICATION_CPU_REQ
+ displayName: Application Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Application container will need (expressed in millicores).
+ value: 1000m
+- name: POSTGRESQL_CPU_REQ
+ displayName: PostgreSQL Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the PostgreSQL container will need (expressed in millicores).
+ value: 500m
+- name: MEMCACHED_CPU_REQ
+ displayName: Memcached Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Memcached container will need (expressed in millicores).
+ value: 200m
+- name: ANSIBLE_CPU_REQ
+ displayName: Ansible Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the Ansible container will need (expressed in millicores).
+ value: 1000m
+- name: APPLICATION_MEM_REQ
+ displayName: Application Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Application container will need.
+ value: 6144Mi
+- name: POSTGRESQL_MEM_REQ
+ displayName: PostgreSQL Min RAM Requested
+ required: true
+ description: Minimum amount of memory the PostgreSQL container will need.
+ value: 4Gi
+- name: MEMCACHED_MEM_REQ
+ displayName: Memcached Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Memcached container will need.
+ value: 64Mi
+- name: ANSIBLE_MEM_REQ
+ displayName: Ansible Min RAM Requested
+ required: true
+ description: Minimum amount of memory the Ansible container will need.
+ value: 2048Mi
+- name: APPLICATION_MEM_LIMIT
+ displayName: Application Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Application container can consume.
+ value: 16384Mi
+- name: POSTGRESQL_MEM_LIMIT
+ displayName: PostgreSQL Max RAM Limit
+ required: true
+ description: Maximum amount of memory the PostgreSQL container can consume.
+ value: 8Gi
+- name: MEMCACHED_MEM_LIMIT
+ displayName: Memcached Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Memcached container can consume.
+ value: 256Mi
+- name: ANSIBLE_MEM_LIMIT
+ displayName: Ansible Max RAM Limit
+ required: true
+ description: Maximum amount of memory the Ansible container can consume.
+ value: 8096Mi
+- name: POSTGRESQL_IMG_NAME
+ displayName: PostgreSQL Image Name
+ description: This is the PostgreSQL image name requested to deploy.
+ value: docker.io/manageiq/postgresql
+- name: POSTGRESQL_IMG_TAG
+ displayName: PostgreSQL Image Tag
+ description: This is the PostgreSQL image tag/version requested to deploy.
+ value: latest
+- name: MEMCACHED_IMG_NAME
+ displayName: Memcached Image Name
+ description: This is the Memcached image name requested to deploy.
+ value: docker.io/manageiq/memcached
+- name: MEMCACHED_IMG_TAG
+ displayName: Memcached Image Tag
+ description: This is the Memcached image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_IMG_NAME
+ displayName: Application Image Name
+ description: This is the Application image name requested to deploy.
+ value: docker.io/manageiq/manageiq-pods
+- name: FRONTEND_APPLICATION_IMG_TAG
+ displayName: Front end Application Image Tag
+ description: This is the ManageIQ Frontend Application image tag/version requested to deploy.
+ value: frontend-latest
+- name: BACKEND_APPLICATION_IMG_TAG
+ displayName: Back end Application Image Tag
+ description: This is the ManageIQ Backend Application image tag/version requested to deploy.
+ value: backend-latest
+- name: ANSIBLE_IMG_NAME
+ displayName: Ansible Image Name
+ description: This is the Ansible image name requested to deploy.
+ value: docker.io/manageiq/embedded-ansible
+- name: ANSIBLE_IMG_TAG
+ displayName: Ansible Image Tag
+ description: This is the Ansible image tag/version requested to deploy.
+ value: latest
+- name: APPLICATION_DOMAIN
+ displayName: Application Hostname
+ description: The exposed hostname that will route to the application service, if left blank a value will be defaulted.
+ value: ''
+- name: APPLICATION_REPLICA_COUNT
+ displayName: Application Replica Count
+ description: This is the number of Application replicas requested to deploy.
+ value: '1'
+- name: APPLICATION_INIT_DELAY
+ displayName: Application Init Delay
+ required: true
+ description: Delay in seconds before we attempt to initialize the application.
+ value: '15'
+- name: APPLICATION_VOLUME_CAPACITY
+ displayName: Application Volume Capacity
+ required: true
+ description: Volume space available for application data.
+ value: 5Gi
+- name: DATABASE_VOLUME_CAPACITY
+ displayName: Database Volume Capacity
+ required: true
+ description: Volume space available for database.
+ value: 15Gi
+- name: HTTPD_SERVICE_NAME
+ required: true
+ displayName: Apache httpd Service Name
+ description: The name of the OpenShift Service exposed for the httpd container.
+ value: httpd
+- name: HTTPD_IMG_NAME
+ displayName: Apache httpd Image Name
+ description: This is the httpd image name requested to deploy.
+ value: docker.io/manageiq/httpd
+- name: HTTPD_IMG_TAG
+ displayName: Apache httpd Image Tag
+ description: This is the httpd image tag/version requested to deploy.
+ value: latest
+- name: HTTPD_CONFIG_DIR
+ displayName: Apache Configuration Directory
+ description: Directory used to store the Apache configuration files.
+ value: "/etc/httpd/conf.d"
+- name: HTTPD_AUTH_CONFIG_DIR
+ displayName: External Authentication Configuration Directory
+ description: Directory used to store the external authentication configuration files.
+ value: "/etc/httpd/auth-conf.d"
+- name: HTTPD_CPU_REQ
+ displayName: Apache httpd Min CPU Requested
+ required: true
+ description: Minimum amount of CPU time the httpd container will need (expressed in millicores).
+ value: 500m
+- name: HTTPD_MEM_REQ
+ displayName: Apache httpd Min RAM Requested
+ required: true
+ description: Minimum amount of memory the httpd container will need.
+ value: 512Mi
+- name: HTTPD_MEM_LIMIT
+ displayName: Apache httpd Max RAM Limit
+ required: true
+ description: Maximum amount of memory the httpd container can consume.
+ value: 8192Mi
diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py
new file mode 100644
index 000000000..3b7013d9a
--- /dev/null
+++ b/roles/openshift_management/filter_plugins/oo_management_filters.py
@@ -0,0 +1,32 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+"""
+Filter methods for the management role
+"""
+
+
+def oo_filter_container_providers(results):
+ """results - the result from posting the API calls for adding new
+providers"""
+ all_results = []
+ for result in results:
+ if 'results' in result['json']:
+ # We got an OK response
+ res = result['json']['results'][0]
+ all_results.append("Provider '{}' - Added successfully".format(res['name']))
+ elif 'error' in result['json']:
+ # This was a problem
+ all_results.append("Provider '{}' - Failed to add. Message: {}".format(
+ result['item']['name'], result['json']['error']['message']))
+ return all_results
+
+
+class FilterModule(object):
+ """ Custom ansible filter mapping """
+
+ # pylint: disable=no-self-use, too-few-public-methods
+ def filters(self):
+ """ returns a mapping of filters to methods """
+ return {
+ "oo_filter_container_providers": oo_filter_container_providers,
+ }
diff --git a/roles/openshift_management/handlers/main.yml b/roles/openshift_management/handlers/main.yml
new file mode 100644
index 000000000..e69de29bb
--- /dev/null
+++ b/roles/openshift_management/handlers/main.yml
diff --git a/roles/openshift_cfme/meta/main.yml b/roles/openshift_management/meta/main.yml
index 162d817f0..07ad51126 100644
--- a/roles/openshift_cfme/meta/main.yml
+++ b/roles/openshift_management/meta/main.yml
@@ -16,4 +16,3 @@ galaxy_info:
dependencies:
- role: lib_openshift
- role: lib_utils
-- role: openshift_master_facts
diff --git a/roles/openshift_management/tasks/accounts.yml b/roles/openshift_management/tasks/accounts.yml
new file mode 100644
index 000000000..e45ea8d43
--- /dev/null
+++ b/roles/openshift_management/tasks/accounts.yml
@@ -0,0 +1,28 @@
+---
+# This role task file is responsible for user/system account creation,
+# and ensuring correct access is provided as required.
+- name: Ensure the CFME system accounts exist
+ oc_serviceaccount:
+ namespace: "{{ openshift_management_project }}"
+ state: present
+ name: "{{ openshift_management_flavor_short }}{{ item.name }}"
+ with_items:
+ - "{{ __openshift_system_account_sccs }}"
+
+- name: Ensure the CFME system accounts have all the required SCCs
+ oc_adm_policy_user:
+ namespace: "{{ openshift_management_project }}"
+ user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}"
+ resource_kind: scc
+ resource_name: "{{ item.resource_name }}"
+ with_items:
+ - "{{ __openshift_system_account_sccs }}"
+
+- name: Ensure the CFME system accounts have the required roles
+ oc_adm_policy_user:
+ namespace: "{{ openshift_management_project }}"
+ user: "system:serviceaccount:{{ openshift_management_project }}:{{ openshift_management_flavor_short }}{{ item.name }}"
+ resource_kind: role
+ resource_name: "{{ item.resource_name }}"
+ with_items:
+ - "{{ __openshift_management_system_account_roles }}"
diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml
new file mode 100644
index 000000000..383e6edb5
--- /dev/null
+++ b/roles/openshift_management/tasks/add_container_provider.yml
@@ -0,0 +1,65 @@
+---
+- name: Ensure lib_openshift modules are available
+ include_role:
+ role: lib_openshift
+
+- name: Ensure OpenShift facts module is available
+ include_role:
+ role: openshift_facts
+
+- name: Ensure OpenShift facts are loaded
+ openshift_facts:
+
+- name: Ensure the management SA Secrets are read
+ oc_serviceaccount_secret:
+ state: list
+ service_account: management-admin
+ namespace: management-infra
+ register: sa
+
+- name: Ensure the management SA bearer token is identified
+ set_fact:
+ management_token: "{{ sa.results | oo_filter_sa_secrets }}"
+
+- name: Ensure the SA bearer token value is read
+ oc_secret:
+ state: list
+ name: "{{ management_token }}"
+ namespace: management-infra
+ decode: true
+ no_log: True
+ register: sa_secret
+
+- name: Ensure the SA bearer token value is saved
+ set_fact:
+ management_bearer_token: "{{ sa_secret.results.decoded.token }}"
+
+- name: Ensure we have the public route to the management service
+ oc_route:
+ state: list
+ name: httpd
+ namespace: openshift-management
+ register: route
+
+- name: Ensure the management service route is saved
+ set_fact:
+ management_route: "{{ route.results.0.spec.host }}"
+
+- name: Ensure this cluster is a container provider
+ uri:
+ url: "https://{{ management_route }}/api/providers"
+ body_format: json
+ method: POST
+ user: "{{ openshift_management_username }}"
+ password: "{{ openshift_management_password }}"
+ validate_certs: no
+ # Docs on formatting the BODY of the POST request:
+ # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations
+ body:
+ connection_configurations:
+ - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken}
+ endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0}
+ hostname: "{{ openshift.master.cluster_public_hostname }}"
+ name: "{{ openshift_management_project }}"
+ port: "{{ openshift.master.api_port }}"
+ type: "ManageIQ::Providers::Openshift::ContainerManager"
diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml
new file mode 100644
index 000000000..9be923a57
--- /dev/null
+++ b/roles/openshift_management/tasks/main.yml
@@ -0,0 +1,96 @@
+---
+######################################################################)
+# Users, projects, and privileges
+
+- name: Run pre-install Management validation checks
+ include: validate.yml
+
+# This creates a service account allowing Container Provider
+# integration (managing OCP/Origin via MIQ/Management)
+- name: Enable Container Provider Integration
+ include_role:
+ role: openshift_manageiq
+
+- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists"
+ oc_project:
+ state: present
+ name: "{{ openshift_management_project }}"
+ display_name: "{{ openshift_management_project_description }}"
+
+- name: Create and Authorize Management Accounts
+ include: accounts.yml
+
+######################################################################
+# STORAGE - Initialize basic storage class
+- name: Determine the correct NFS host if required
+ include: storage/nfs_server.yml
+ when: openshift_management_storage_class in ['nfs', 'nfs_external']
+
+#---------------------------------------------------------------------
+# * nfs - set up NFS shares on the first master for a proof of concept
+- name: Create required NFS exports for Management app storage
+ include: storage/nfs.yml
+ when: openshift_management_storage_class == 'nfs'
+
+#---------------------------------------------------------------------
+# * external - NFS again, but pointing to a pre-configured NFS server
+- name: Note Storage Type - External NFS
+ debug:
+ msg: "Setting up external NFS storage, openshift_management_storage_class is {{ openshift_management_storage_class }}"
+ when: openshift_management_storage_class == 'nfs_external'
+
+#---------------------------------------------------------------------
+# * cloudprovider - use an existing cloudprovider based storage
+- name: Note Storage Type - Cloud Provider
+ debug:
+ msg: Validating cloud provider storage type, openshift_management_storage_class is 'cloudprovider'
+ when: openshift_management_storage_class == 'cloudprovider'
+
+#---------------------------------------------------------------------
+# * preconfigured - don't do anything, assume it's all there ready to go
+- name: Note Storage Type - Preconfigured
+ debug:
+ msg: Skipping storage configuration, openshift_management_storage_class is 'preconfigured'
+ when: openshift_management_storage_class == 'preconfigured'
+
+######################################################################
+# APPLICATION TEMPLATE
+- name: Install the Management app and PV templates
+ include: template.yml
+
+######################################################################
+# APP & DB Storage
+
+# For local/external NFS backed installations
+- name: "Create the required App and DB PVs using {{ openshift_management_storage_class }}"
+ include: storage/create_nfs_pvs.yml
+ when:
+ - openshift_management_storage_class in ['nfs', 'nfs_external']
+
+######################################################################
+# CREATE APP
+- name: Note the correct ext-db template name
+ set_fact:
+ openshift_management_template_name: "{{ openshift_management_flavor }}-ext-db"
+ when:
+ - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
+
+- name: Note the correct podified db template name
+ set_fact:
+ openshift_management_template_name: "{{ openshift_management_flavor }}"
+ when:
+ - openshift_management_app_template in ['miq-template', 'cfme-template']
+
+- name: Ensure the Management App is created
+ oc_process:
+ namespace: "{{ openshift_management_project }}"
+ template_name: "{{ openshift_management_template_name }}"
+ create: True
+ params: "{{ openshift_management_template_parameters }}"
+
+- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max
+ command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}"
+ register: app_seeding_logs
+ until: app_seeding_logs.stdout.find('Server starting complete') != -1
+ delay: 30
+ retries: 20
diff --git a/roles/openshift_management/tasks/noop.yml b/roles/openshift_management/tasks/noop.yml
new file mode 100644
index 000000000..ed97d539c
--- /dev/null
+++ b/roles/openshift_management/tasks/noop.yml
@@ -0,0 +1 @@
+---
diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
new file mode 100644
index 000000000..d1b9a8d5c
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml
@@ -0,0 +1,69 @@
+---
+# Create the required PVs for the App and the DB
+- name: Note the App PV Size from Template Parameters
+ set_fact:
+ openshift_management_app_pv_size: "{{ openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY }}"
+ when:
+ - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is defined
+
+- name: Note the App PV Size from defaults
+ set_fact:
+ openshift_management_app_pv_size: "{{ __openshift_management_app_pv_size }}"
+ when:
+ - openshift_management_template_parameters.APPLICATION_VOLUME_CAPACITY is not defined
+
+- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+ block:
+ - name: Note the DB PV Size from Template Parameters
+ set_fact:
+ openshift_management_db_pv_size: "{{ openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY }}"
+ when:
+ - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is defined
+
+ - name: Note the DB PV Size from defaults
+ set_fact:
+ openshift_management_db_pv_size: "{{ __openshift_management_db_pv_size }}"
+ when:
+ - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined
+
+- name: Check if the Management App PV has been created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: pv
+ name: "{{ openshift_management_flavor_short }}-app"
+ register: miq_app_pv_check
+
+- name: Check if the Management DB PV has been created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: pv
+ name: "{{ openshift_management_flavor_short }}-db"
+ register: miq_db_pv_check
+ when:
+ - openshift_management_app_template in ['miq-template', 'cfme-template']
+
+- name: Ensure the Management App PV is created
+ oc_process:
+ namespace: "{{ openshift_management_project }}"
+ template_name: "{{ openshift_management_flavor }}-app-pv"
+ create: True
+ params:
+ PV_SIZE: "{{ openshift_management_app_pv_size }}"
+ BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}"
+ NFS_HOST: "{{ openshift_management_nfs_server }}"
+ when: miq_app_pv_check.results.results == [{}]
+
+- name: Ensure the Management DB PV is created
+ oc_process:
+ namespace: "{{ openshift_management_project }}"
+ template_name: "{{ openshift_management_flavor }}-db-pv"
+ create: True
+ params:
+ PV_SIZE: "{{ openshift_management_db_pv_size }}"
+ BASE_PATH: "{{ openshift_management_storage_nfs_base_dir }}"
+ NFS_HOST: "{{ openshift_management_nfs_server }}"
+ when:
+ - openshift_management_app_template in ['miq-template', 'cfme-template']
+ - miq_db_pv_check.results.results == [{}]
diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml
new file mode 100644
index 000000000..94e11137c
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/nfs.yml
@@ -0,0 +1,36 @@
+---
+# Tasks to statically provision NFS volumes
+# Include if not using dynamic volume provisioning
+
+- name: Setting up NFS storage
+ block:
+ - name: Include the NFS Setup role tasks
+ include_role:
+ role: openshift_nfs
+ tasks_from: setup
+ vars:
+ l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
+
+ - name: Create the App export
+ include_role:
+ role: openshift_nfs
+ tasks_from: create_export
+ vars:
+ l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
+ l_nfs_export_config: "{{ openshift_management_flavor_short }}"
+ l_nfs_export_name: "{{ openshift_management_flavor_short }}-app"
+ l_nfs_options: "*(rw,no_root_squash,no_wdelay)"
+
+ - name: Create the DB export
+ include_role:
+ role: openshift_nfs
+ tasks_from: create_export
+ vars:
+ l_nfs_base_dir: "{{ openshift_management_storage_nfs_base_dir }}"
+ l_nfs_export_config: "{{ openshift_management_flavor_short }}"
+ l_nfs_export_name: "{{ openshift_management_flavor_short }}-db"
+ l_nfs_options: "*(rw,no_root_squash,no_wdelay)"
+ when:
+ - openshift_management_app_template in ['miq-template', 'cfme-template']
+
+ delegate_to: "{{ openshift_management_nfs_server }}"
diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml
new file mode 100644
index 000000000..a1b618137
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/nfs_server.yml
@@ -0,0 +1,45 @@
+---
+- name: Ensure we save the local NFS server if one is provided
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}"
+ when:
+ - openshift_management_storage_nfs_local_hostname is defined
+ - openshift_management_storage_nfs_local_hostname != False
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the local NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}"
+ when:
+ - openshift_management_nfs_server is not defined
+ - openshift_management_storage_class == "nfs"
+
+- name: Ensure we save the external NFS server
+ set_fact:
+ openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}"
+ when:
+ - openshift_management_storage_class == "nfs_external"
+
+- name: Failed External NFS server detection
+ assert:
+ that:
+ - openshift_management_nfs_server is defined
+ msg: |
+ Unable to detect an NFS server. The 'nfs_external'
+ openshift_management_storage_class option requires that you
+ manually set openshift_management_storage_nfs_external_hostname
+ parameter.
+ when:
+ - openshift_management_storage_class == 'nfs_external'
+
+- name: Failed Local NFS server detection
+ assert:
+ that:
+ - openshift_management_nfs_server is defined
+ msg: |
+ Unable to detect an NFS server. The 'nfs'
+ openshift_management_storage_class option requires that you have
+ an 'nfs' inventory group or manually set the
+ openshift_management_storage_nfs_local_hostname parameter.
+ when:
+ - openshift_management_storage_class == 'nfs'
diff --git a/roles/openshift_management/tasks/storage/storage.yml b/roles/openshift_management/tasks/storage/storage.yml
new file mode 100644
index 000000000..d8bf7aa3e
--- /dev/null
+++ b/roles/openshift_management/tasks/storage/storage.yml
@@ -0,0 +1,3 @@
+---
+- include: nfs.yml
+ when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml
new file mode 100644
index 000000000..9f97cdcb9
--- /dev/null
+++ b/roles/openshift_management/tasks/template.yml
@@ -0,0 +1,128 @@
+---
+# Tasks for ensuring the correct CFME templates are landed on the remote system
+
+######################################################################
+# CFME App Template
+#
+# Note, this is different from the create_nfs_pvs.yml tasks in that
+# the application template does not require any jinja2 evaluation.
+#
+# TODO: Handle the case where the server or PV templates are updated
+# in openshift-ansible and the change needs to be landed on the
+# managed cluster.
+
+######################################################################
+# STANDARD PODIFIED DATABASE TEMPLATE
+- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+ block:
+ - name: Check if the Management Server template has been created already
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: template
+ name: "{{ openshift_management_flavor }}"
+ register: miq_server_check
+
+ - when: miq_server_check.results.results == [{}]
+ block:
+ - name: Copy over Management Server template
+ copy:
+ src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml"
+ dest: "{{ template_dir }}/"
+
+ - name: Ensure Management Server Template is created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ name: "{{ openshift_management_flavor }}"
+ state: present
+ kind: template
+ files:
+ - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template.yaml"
+
+######################################################################
+# EXTERNAL DATABASE TEMPLATE
+- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
+ block:
+ - name: Check if the Management Ext-DB Server template has been created already
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: template
+ name: "{{ openshift_management_flavor }}-ext-db"
+ register: miq_ext_db_server_check
+
+ - when: miq_ext_db_server_check.results.results == [{}]
+ block:
+ - name: Copy over Management Ext-DB Server template
+ copy:
+ src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml"
+ dest: "{{ template_dir }}/"
+
+ - name: Ensure Management Ext-DB Server Template is created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ name: "{{ openshift_management_flavor }}-ext-db"
+ state: present
+ kind: template
+ files:
+ - "{{ template_dir }}/{{ openshift_management_flavor_short }}-template-ext-db.yaml"
+
+# End app template creation.
+######################################################################
+
+######################################################################
+# Begin conditional PV template creations
+
+# Required for the application server
+- name: Check if the Management App PV template has been created already
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: template
+ name: "{{ openshift_management_flavor }}-app-pv"
+ register: miq_app_pv_check
+
+- when: miq_app_pv_check.results.results == [{}]
+ block:
+ - name: Copy over Management App PV template
+ copy:
+ src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
+ dest: "{{ template_dir }}/"
+
+ - name: Ensure Management App PV Template is created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ name: "{{ openshift_management_flavor }}-app-pv"
+ state: present
+ kind: template
+ files:
+ - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml"
+
+#---------------------------------------------------------------------
+
+# Required for database if the installation is fully podified
+- when: openshift_management_app_template in ['miq-template', 'cfme-template']
+ block:
+ - name: Check if the Management DB PV template has been created already
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ state: list
+ kind: template
+ name: "{{ openshift_management_flavor }}-db-pv"
+ register: miq_db_pv_check
+
+ - when: miq_db_pv_check.results.results == [{}]
+ block:
+ - name: Copy over Management DB PV template
+ copy:
+ src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
+ dest: "{{ template_dir }}/"
+
+ - name: Ensure Management DB PV Template is created
+ oc_obj:
+ namespace: "{{ openshift_management_project }}"
+ name: "{{ openshift_management_flavor }}-db-pv"
+ state: present
+ kind: template
+ files:
+ - "{{ template_dir }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml"
diff --git a/roles/openshift_management/tasks/uninstall.yml b/roles/openshift_management/tasks/uninstall.yml
new file mode 100644
index 000000000..09fbc609f
--- /dev/null
+++ b/roles/openshift_management/tasks/uninstall.yml
@@ -0,0 +1,23 @@
+---
+- name: Start removing all the objects
+ command: "oc delete -n {{ openshift_management_project }} {{ item }} --all"
+ with_items:
+ - rc
+ - dc
+ - po
+ - svc
+ - pv
+ - pvc
+ - statefulsets
+ - routes
+
+- name: Remove the project
+ command: "oc delete -n {{ openshift_management_project }} project {{ openshift_management_project }}"
+
+- name: Verify project has been destroyed
+ command: "oc get project {{ openshift_management_project }}"
+ ignore_errors: True
+ register: project_terminated
+ until: project_terminated.stderr.find("NotFound") != -1
+ delay: 5
+ retries: 30
diff --git a/roles/openshift_management/tasks/validate.yml b/roles/openshift_management/tasks/validate.yml
new file mode 100644
index 000000000..8b20bdc5e
--- /dev/null
+++ b/roles/openshift_management/tasks/validate.yml
@@ -0,0 +1,90 @@
+---
+# Validate configuration parameters passed to the openshift_management role
+
+######################################################################
+# CORE PARAMETERS
+- name: Ensure openshift_management_app_template is valid
+ assert:
+ that:
+ - openshift_management_app_template in __openshift_management_app_templates
+
+ msg: |
+ "openshift_management_app_template must be one of {{
+ __openshift_management_app_templates | join(', ') }}"
+
+- name: Ensure openshift_management_storage_class is a valid type
+ assert:
+ that:
+ - openshift_management_storage_class in __openshift_management_storage_classes
+ msg: |
+ "openshift_management_storage_class must be one of {{
+ __openshift_management_storage_classes | join(', ') }}"
+
+######################################################################
+# STORAGE PARAMS - NFS
+- name: Ensure external NFS storage has a valid NFS server hostname defined
+ assert:
+ that:
+ - openshift_management_storage_nfs_external_hostname | default(False)
+ msg: |
+ The selected storage class 'nfs_external' requires a valid
+ hostname for the openshift_management_storage_nfs_hostname parameter
+ when:
+ - openshift_management_storage_class == 'nfs_external'
+
+- name: Ensure local NFS storage has a valid NFS server to use
+ fail:
+ msg: |
+ No NFS hosts detected or defined but storage class is set to
+ 'nfs'. Add hosts to your [nfs] group or define one manually with
+ the 'openshift_management_storage_nfs_local_hostname' parameter
+ when:
+ - openshift_management_storage_class == 'nfs'
+ # You haven't created any NFS groups
+ - (groups.nfs is defined and groups.nfs | length == 0) or (groups.nfs is not defined)
+ # You did not manually specify a host to use
+ - (openshift_management_storage_nfs_local_hostname is not defined) or (openshift_management_storage_nfs_local_hostname == false)
+
+######################################################################
+# STORAGE PARAMS -CLOUD PROVIDER
+- name: Validate Cloud Provider storage class
+ assert:
+ that:
+ - openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'
+ msg: |
+ openshift_management_storage_class is 'cloudprovider' but you have an
+ invalid kind defined, '{{ openshift_cloudprovider_kind }}'. See
+ 'openshift_cloudprovider_kind' in the example inventories for
+ the required parameters for your selected cloud
+ provider. Working providers: 'aws' and 'gce'.
+ when:
+ - openshift_management_storage_class == 'cloudprovider'
+ - openshift_cloudprovider_kind is defined
+
+- name: Validate 'cloudprovider' Storage Class has required parameters defined
+ assert:
+ that:
+ - openshift_cloudprovider_kind is defined
+ msg: |
+ openshift_management_storage_class is 'cloudprovider' but you do not
+ have 'openshift_cloudprovider_kind' defined, this is
+ required. Search the example inventories for
+ 'openshift_cloudprovider_kind'. The required parameters for your
+ selected cloud provider must be defined in your inventory as
+ well. Working providers: 'aws' and 'gce'.
+ when:
+ - openshift_management_storage_class == 'cloudprovider'
+
+######################################################################
+# DATABASE CONNECTION VALIDATION
+- name: Validate all required database parameters were provided for ext-db template
+ assert:
+ that:
+ - item in openshift_management_template_parameters
+ msg: |
+ "You are using external database services but a required
+ database parameter {{ item }} was not found in
+ 'openshift_management_template_parameters'"
+ with_items: "{{ __openshift_management_required_db_conn_params }}"
+ when:
+ - openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db']
diff --git a/roles/openshift_management/vars/main.yml b/roles/openshift_management/vars/main.yml
new file mode 100644
index 000000000..da3ad0af7
--- /dev/null
+++ b/roles/openshift_management/vars/main.yml
@@ -0,0 +1,76 @@
+---
+# Misc enumerated values
+#---------------------------------------------------------------------
+# Allowed choices for the storage class parameter
+__openshift_management_storage_classes:
+ - nfs
+ - nfs_external
+ - preconfigured
+ - cloudprovider
+
+#---------------------------------------------------------------------
+# DEFAULT PV SIZES
+# How large to make the MIQ application PV
+__openshift_management_app_pv_size: 5Gi
+# How large to make the MIQ PostgreSQL PV
+__openshift_management_db_pv_size: 15Gi
+
+# Name of the application templates with object/parameter definitions
+__openshift_management_app_templates:
+ - miq-template-ext-db
+ - miq-template
+ - cfme-template-ext-db
+ - cfme-template
+
+# PostgreSQL database connection parameters
+__openshift_management_db_parameters:
+ - DATABASE_USER
+ - DATABASE_PASSWORD
+ - DATABASE_IP
+ - DATABASE_PORT
+ - DATABASE_NAME
+
+# # Commented out until we can support both CFME and MIQ
+# # openshift_management_flavor: "{{ 'cloudforms' if openshift_deployment_type == 'openshift-enterprise' else 'manageiq' }}"
+#openshift_management_flavor: cloudforms
+openshift_management_flavor: manageiq
+# TODO: Make this conditional as well based on the prior variable
+# # openshift_management_flavor_short: "{{ 'cfme' if openshift_deployment_type == 'openshift-enterprise' else 'miq' }}"
+# openshift_management_flavor_short: cfme
+openshift_management_flavor_short: miq
+
+######################################################################
+# ACCOUNTING
+######################################################################
+# Service Account SSCs
+__openshift_system_account_sccs:
+ - name: -anyuid
+ resource_name: anyuid
+ - name: -orchestrator
+ resource_name: anyuid
+ - name: -privileged
+ resource_name: privileged
+ - name: -httpd
+ resource_name: anyuid
+
+# Service Account Roles
+__openshift_management_system_account_roles:
+ - name: -orchestrator
+ resource_name: view
+ - name: -orchestrator
+ resource_name: edit
+
+######################################################################
+# DEFAULTS
+######################################################################
+# User only has to provide parameters they need to override, we will
+# do a hash update method with the provided user parameters to create
+# the final connection structure.
+#
+# TODO: Update user provided configs with this if they are missing fields
+__openshift_management_required_db_conn_params:
+ - DATABASE_USER
+ - DATABASE_PASSWORD
+ - DATABASE_IP
+ - DATABASE_PORT
+ - DATABASE_NAME
diff --git a/roles/openshift_master/defaults/main.yml b/roles/openshift_master/defaults/main.yml
index 73e935d3f..fe78dea66 100644
--- a/roles/openshift_master/defaults/main.yml
+++ b/roles/openshift_master/defaults/main.yml
@@ -1,4 +1,9 @@
---
+# openshift_master_defaults_in_use is a workaround to detect if we are consuming
+# the plays from the role or outside of the role.
+openshift_master_defaults_in_use: True
+openshift_master_debug_level: "{{ debug_level | default(2) }}"
+
r_openshift_master_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_master_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
@@ -7,7 +12,7 @@ r_openshift_master_clean_install: false
r_openshift_master_etcd3_storage: false
r_openshift_master_os_firewall_enable: true
r_openshift_master_os_firewall_deny: []
-r_openshift_master_os_firewall_allow:
+default_r_openshift_master_os_firewall_allow:
- service: api server https
port: "{{ openshift.master.api_port }}/tcp"
- service: api controllers https
@@ -19,6 +24,8 @@ r_openshift_master_os_firewall_allow:
- service: etcd embedded
port: 4001/tcp
cond: "{{ groups.oo_etcd_to_config | default([]) | length == 0 }}"
+r_openshift_master_os_firewall_allow: "{{ default_r_openshift_master_os_firewall_allow | union(openshift_master_open_ports | default([])) }}"
+
# oreg_url is defined by user input
oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
@@ -26,6 +33,9 @@ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
oreg_auth_credentials_replace: False
l_bind_docker_reg_auth: False
+containerized_svc_dir: "/usr/lib/systemd/system"
+ha_svc_template_path: "native-cluster"
+
# NOTE
# r_openshift_master_*_default may be defined external to this role.
# openshift_use_*, if defined, may affect other roles or play behavior.
@@ -38,8 +48,99 @@ r_openshift_master_use_nuage: "{{ r_openshift_master_use_nuage_default }}"
r_openshift_master_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
r_openshift_master_use_contiv: "{{ r_openshift_master_use_contiv_default }}"
+r_openshift_master_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+r_openshift_master_use_kuryr: "{{ r_openshift_master_use_kuryr_default }}"
+
r_openshift_master_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
r_openshift_master_data_dir: "{{ r_openshift_master_data_dir_default }}"
r_openshift_master_sdn_network_plugin_name_default: "{{ os_sdn_network_plugin_name | default('redhat/openshift-ovs-subnet') }}"
r_openshift_master_sdn_network_plugin_name: "{{ r_openshift_master_sdn_network_plugin_name_default }}"
+
+openshift_master_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_master_image_config_latest: "{{ openshift_master_image_config_latest_default }}"
+
+openshift_master_config_dir_default: "{{ (openshift.common.config_base | default('/etc/origin/master')) ~ '/master' }}"
+openshift_master_config_dir: "{{ openshift_master_config_dir_default }}"
+openshift_master_cloud_provider: "{{ openshift_cloudprovider_kind | default('aws') }}"
+
+openshift_master_node_config_networkconfig_mtu: "{{ openshift_node_sdn_mtu | default(1450) }}"
+
+openshift_master_node_config_kubeletargs_cpu: 500m
+openshift_master_node_config_kubeletargs_mem: 512M
+
+openshift_master_bootstrap_enabled: False
+
+openshift_master_client_binary: "{{ openshift.common.client_binary if openshift is defined else 'oc' }}"
+
+openshift_master_config_imageconfig_format: "{{ openshift.node.registry_url }}"
+
+# these are for the default settings in a generated node-config.yaml
+openshift_master_node_config_default_edits:
+- key: nodeName
+ state: absent
+- key: dnsBindAddress
+ value: 127.0.0.1:53
+- key: dnsDomain
+ value: cluster.local
+- key: dnsRecursiveResolvConf
+ value: /etc/origin/node/resolv.conf
+- key: imageConfig.format
+ value: "{{ openshift_master_config_imageconfig_format }}"
+- key: kubeletArguments.cloud-config
+ value:
+ - "/etc/origin/cloudprovider/{{ openshift_master_cloud_provider }}.conf"
+- key: kubeletArguments.cloud-provider
+ value:
+ - "{{ openshift_master_cloud_provider }}"
+- key: kubeletArguments.kube-reserved
+ value:
+ - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}"
+- key: kubeletArguments.system-reserved
+ value:
+ - "cpu={{ openshift_master_node_config_kubeletargs_cpu }},memory={{ openshift_master_node_config_kubeletargs_mem }}"
+- key: enable-controller-attach-detach
+ value:
+ - 'true'
+- key: networkConfig.mtu
+ value: "{{ openshift_master_node_config_networkconfig_mtu }}"
+- key: networkConfig.networkPluginName
+ value: "{{ r_openshift_master_sdn_network_plugin_name }}"
+- key: networkPluginName
+ value: "{{ r_openshift_master_sdn_network_plugin_name }}"
+
+
+# We support labels for all nodes here
+openshift_master_node_config_kubeletargs_default_labels: []
+# We do support overrides for node group labels
+openshift_master_node_config_kubeletargs_master_labels: []
+openshift_master_node_config_kubeletargs_infra_labels: []
+openshift_master_node_config_kubeletargs_compute_labels: []
+
+openshift_master_node_config_master:
+ type: master
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_master_labels) |
+ union(['type=master']) }}"
+openshift_master_node_config_infra:
+ type: infra
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_infra_labels) |
+ union(['type=infra']) }}"
+openshift_master_node_config_compute:
+ type: compute
+ edits:
+ - key: kubeletArguments.node-labels
+ value: "{{ openshift_master_node_config_kubeletargs_default_labels |
+ union(openshift_master_node_config_kubeletargs_compute_labels) |
+ union(['type=compute']) }}"
+
+openshift_master_node_configs:
+- "{{ openshift_master_node_config_infra }}"
+- "{{ openshift_master_node_config_compute }}"
+
+openshift_master_bootstrap_namespace: openshift-node
diff --git a/roles/openshift_master/meta/main.yml b/roles/openshift_master/meta/main.yml
index a657668a9..a1cda2ad4 100644
--- a/roles/openshift_master/meta/main.yml
+++ b/roles/openshift_master/meta/main.yml
@@ -13,4 +13,5 @@ galaxy_info:
- cloud
dependencies:
- role: lib_openshift
+- role: lib_utils
- role: lib_os_firewall
diff --git a/roles/openshift_master/tasks/bootstrap.yml b/roles/openshift_master/tasks/bootstrap.yml
index 0013f5289..f837a8bae 100644
--- a/roles/openshift_master/tasks/bootstrap.yml
+++ b/roles/openshift_master/tasks/bootstrap.yml
@@ -1,28 +1,78 @@
---
-
-- name: ensure the node-bootstrap service account exists
- oc_serviceaccount:
- name: node-bootstrapper
- namespace: openshift-infra
- state: present
- run_once: true
-
-- name: grant node-bootstrapper the correct permissions to bootstrap
- oc_adm_policy_user:
- namespace: openshift-infra
- user: system:serviceaccount:openshift-infra:node-bootstrapper
- resource_kind: cluster-role
- resource_name: system:node-bootstrapper
- state: present
- run_once: true
-
# TODO: create a module for this command.
# oc_serviceaccounts_kubeconfig
- name: create service account kubeconfig with csr rights
command: "oc serviceaccounts create-kubeconfig node-bootstrapper -n openshift-infra"
register: kubeconfig_out
+ until: kubeconfig_out.rc == 0
+ retries: 24
+ delay: 5
- name: put service account kubeconfig into a file on disk for bootstrap
copy:
content: "{{ kubeconfig_out.stdout }}"
dest: "{{ openshift_master_config_dir }}/bootstrap.kubeconfig"
+
+- name: create a temp dir for this work
+ command: mktemp -d /tmp/openshift_node_config-XXXXXX
+ register: mktempout
+ run_once: true
+
+# This generate is so that we do not have to maintain
+# our own copy of the template. This is generated by
+# the product and the following settings will be
+# generated by the master
+- name: generate a node-config dynamically
+ command: >
+ {{ openshift_master_client_binary }} adm create-node-config
+ --node-dir={{ mktempout.stdout }}/
+ --node=CONFIGMAP
+ --hostnames=test
+ --dns-ip=0.0.0.0
+ --certificate-authority={{ openshift_master_config_dir }}/ca.crt
+ --signer-cert={{ openshift_master_config_dir }}/ca.crt
+ --signer-key={{ openshift_master_config_dir }}/ca.key
+ --signer-serial={{ openshift_master_config_dir }}/ca.serial.txt
+ --node-client-certificate-authority={{ openshift_master_config_dir }}/ca.crt
+ register: configgen
+ run_once: true
+
+- name: remove the default settings
+ yedit:
+ state: "{{ item.state | default('present') }}"
+ src: "{{ mktempout.stdout }}/node-config.yaml"
+ key: "{{ item.key }}"
+ value: "{{ item.value | default(omit) }}"
+ with_items: "{{ openshift_master_node_config_default_edits }}"
+ run_once: true
+
+- name: copy the generated config into each group
+ copy:
+ src: "{{ mktempout.stdout }}/node-config.yaml"
+ remote_src: true
+ dest: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: "specialize the generated configs for node-config-{{ item.type }}"
+ yedit:
+ src: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ edits: "{{ item.edits }}"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: create node-config.yaml configmap
+ oc_configmap:
+ name: "node-config-{{ item.type }}"
+ namespace: "{{ openshift_master_bootstrap_namespace }}"
+ from_file:
+ node-config.yaml: "{{ mktempout.stdout }}/node-config-{{ item.type }}.yaml"
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
+
+- name: remove templated files
+ file:
+ dest: "{{ mktempout.stdout }}/"
+ state: absent
+ with_items: "{{ openshift_master_node_configs }}"
+ run_once: true
diff --git a/roles/openshift_master/tasks/check_master_api_is_ready.yml b/roles/openshift_master/tasks/check_master_api_is_ready.yml
new file mode 100644
index 000000000..7e8a7a596
--- /dev/null
+++ b/roles/openshift_master/tasks/check_master_api_is_ready.yml
@@ -0,0 +1,14 @@
+---
+- name: Wait for API to become available
+ # Using curl here since the uri module requires python-httplib2 and
+ # wait_for port doesn't provide health information.
+ command: >
+ curl --silent --tlsv1.2
+ --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
+ {{ openshift.master.api_url }}/healthz/ready
+ register: l_api_available_output
+ until: l_api_available_output.stdout == 'ok'
+ retries: 120
+ delay: 1
+ run_once: true
+ changed_when: false
diff --git a/roles/openshift_master/tasks/clean_systemd_units.yml b/roles/openshift_master/tasks/clean_systemd_units.yml
deleted file mode 100644
index e641f84d4..000000000
--- a/roles/openshift_master/tasks/clean_systemd_units.yml
+++ /dev/null
@@ -1,9 +0,0 @@
----
-
-- name: Disable master service
- systemd:
- name: "{{ openshift.common.service_type }}-master"
- state: stopped
- enabled: no
- masked: yes
- ignore_errors: true
diff --git a/roles/openshift_master/tasks/configure_external_etcd.yml b/roles/openshift_master/tasks/configure_external_etcd.yml
new file mode 100644
index 000000000..b0590ac84
--- /dev/null
+++ b/roles/openshift_master/tasks/configure_external_etcd.yml
@@ -0,0 +1,17 @@
+---
+- name: Remove etcdConfig section
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: "etcdConfig"
+ state: absent
+- name: Set etcdClientInfo.ca to master.etcd-ca.crt
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: etcdClientInfo.ca
+ value: master.etcd-ca.crt
+- name: Set etcdClientInfo.urls to the external etcd
+ yedit:
+ src: /etc/origin/master/master-config.yaml
+ key: etcdClientInfo.urls
+ value:
+ - "{{ etcd_peer_url_scheme }}://{{ etcd_ip }}:{{ etcd_peer_port }}"
diff --git a/roles/openshift_master/tasks/journald.yml b/roles/openshift_master/tasks/journald.yml
new file mode 100644
index 000000000..a16cbe78e
--- /dev/null
+++ b/roles/openshift_master/tasks/journald.yml
@@ -0,0 +1,29 @@
+---
+- name: Checking for journald.conf
+ stat: path=/etc/systemd/journald.conf
+ register: journald_conf_file
+
+- name: Create journald persistence directories
+ file:
+ path: /var/log/journal
+ state: directory
+
+- name: Update journald setup
+ replace:
+ dest: /etc/systemd/journald.conf
+ regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
+ replace: ' {{ item.var }}={{ item.val }}'
+ backup: yes
+ with_items: "{{ journald_vars_to_replace | default([]) }}"
+ when: journald_conf_file.stat.exists
+ register: journald_update
+
+# I need to restart journald immediatelly, otherwise it gets into way during
+# further steps in ansible
+- name: Restart journald
+ command: "systemctl restart systemd-journald"
+ retries: 3
+ delay: 5
+ register: result
+ until: result.rc == 0
+ when: journald_update | changed
diff --git a/roles/openshift_master/tasks/main.yml b/roles/openshift_master/tasks/main.yml
index 82b4b420c..48b34c578 100644
--- a/roles/openshift_master/tasks/main.yml
+++ b/roles/openshift_master/tasks/main.yml
@@ -177,31 +177,12 @@
local_facts:
no_proxy_etcd_host_ips: "{{ openshift_no_proxy_etcd_host_ips }}"
+- name: Update journald config
+ include: journald.yml
+
- name: Install the systemd units
include: systemd_units.yml
-- name: Checking for journald.conf
- stat: path=/etc/systemd/journald.conf
- register: journald_conf_file
-
-- name: Update journald setup
- replace:
- dest: /etc/systemd/journald.conf
- regexp: '^(\#| )?{{ item.var }}=\s*.*?$'
- replace: ' {{ item.var }}={{ item.val }}'
- backup: yes
- with_items: "{{ journald_vars_to_replace | default([]) }}"
- when: journald_conf_file.stat.exists
- register: journald_update
-
-# I need to restart journald immediatelly, otherwise it gets into way during
-# further steps in ansible
-- name: Restart journald
- systemd:
- name: systemd-journald
- state: restarted
- when: journald_update | changed
-
- name: Install Master system container
include: system_container.yml
when:
@@ -311,59 +292,18 @@
# A separate wait is required here for native HA since notifies will
# be resolved after all tasks in the role.
-- name: Wait for API to become available
- # Using curl here since the uri module requires python-httplib2 and
- # wait_for port doesn't provide health information.
- command: >
- curl --silent --tlsv1.2
- {% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- --cacert {{ openshift.common.config_base }}/master/ca-bundle.crt
- {% else %}
- --cacert {{ openshift.common.config_base }}/master/ca.crt
- {% endif %}
- {{ openshift.master.api_url }}/healthz/ready
- register: l_api_available_output
- until: l_api_available_output.stdout == 'ok'
- retries: 120
- delay: 1
- run_once: true
- changed_when: false
+- include: check_master_api_is_ready.yml
when:
- openshift.master.cluster_method == 'native'
- master_api_service_status_changed | bool
-- name: Start and enable master controller on first master
- systemd:
- name: "{{ openshift.common.service_type }}-master-controllers"
- enabled: yes
- state: started
- when:
- - openshift.master.cluster_method == 'native'
- - inventory_hostname == openshift_master_hosts[0]
- register: l_start_result
- until: not l_start_result | failed
- retries: 1
- delay: 60
-
-- name: Dump logs from master-controllers if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-master-controllers
- when:
- - l_start_result | failed
-
-- name: Wait for master controller service to start on first master
- pause:
- seconds: 15
- when:
- - openshift.master.cluster_method == 'native'
-
-- name: Start and enable master controller on all masters
+- name: Start and enable master controller service
systemd:
name: "{{ openshift.common.service_type }}-master-controllers"
enabled: yes
state: started
when:
- openshift.master.cluster_method == 'native'
- - inventory_hostname != openshift_master_hosts[0]
register: l_start_result
until: not l_start_result | failed
retries: 1
@@ -374,7 +314,8 @@
when:
- l_start_result | failed
-- set_fact:
+- name: Set fact master_controllers_service_status_changed
+ set_fact:
master_controllers_service_status_changed: "{{ l_start_result | changed }}"
when:
- openshift.master.cluster_method == 'native'
diff --git a/roles/openshift_master/tasks/registry_auth.yml b/roles/openshift_master/tasks/registry_auth.yml
index 2644f235e..cde01c49e 100644
--- a/roles/openshift_master/tasks/registry_auth.yml
+++ b/roles/openshift_master/tasks/registry_auth.yml
@@ -1,14 +1,4 @@
---
-# We need to setup some variables as this play might be called directly
-# from outside of the role.
-- set_fact:
- oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
- when: oreg_auth_credentials_path is not defined
-
-- set_fact:
- oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
- when: oreg_host is not defined
-
- name: Check for credentials file for registry auth
stat:
path: "{{ oreg_auth_credentials_path }}"
@@ -21,6 +11,9 @@
- oreg_auth_user is defined
- (not master_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: master_oreg_auth_credentials_create
+ retries: 3
+ delay: 5
+ until: master_oreg_auth_credentials_create.rc == 0
notify:
- restart master api
- restart master controllers
diff --git a/roles/openshift_master/tasks/system_container.yml b/roles/openshift_master/tasks/system_container.yml
index 91332acfb..843352532 100644
--- a/roles/openshift_master/tasks/system_container.yml
+++ b/roles/openshift_master/tasks/system_container.yml
@@ -1,4 +1,9 @@
---
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull master system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.master.master_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_master/tasks/systemd_units.yml b/roles/openshift_master/tasks/systemd_units.yml
index 8de62c59a..5751723ab 100644
--- a/roles/openshift_master/tasks/systemd_units.yml
+++ b/roles/openshift_master/tasks/systemd_units.yml
@@ -1,31 +1,9 @@
---
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks. For that reason the ha_svc variables are use set_fact instead of
-# the vars directory on the role.
+# systemd_units.yml is included both in the openshift_master role and in the upgrade
+# playbooks.
-# This play may be consumed outside the role, we need to ensure that
-# openshift_master_config_dir is set.
-- name: Set openshift_master_config_dir if unset
- set_fact:
- openshift_master_config_dir: '/etc/origin/master'
- when: openshift_master_config_dir is not defined
-
-# This play may be consumed outside the role, we need to ensure that
-# r_openshift_master_data_dir is set.
-- name: Set r_openshift_master_data_dir if unset
- set_fact:
- r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
- when: r_openshift_master_data_dir is not defined
-
-- include: registry_auth.yml
-
-- name: Remove the legacy master service if it exists
- include: clean_systemd_units.yml
-
-- name: Init HA Service Info
- set_fact:
- containerized_svc_dir: "/usr/lib/systemd/system"
- ha_svc_template_path: "native-cluster"
+- include: upgrade_facts.yml
+ when: openshift_master_defaults_in_use is not defined
- name: Set HA Service Info for containerized installs
set_fact:
@@ -34,6 +12,25 @@
when:
- openshift.common.is_containerized | bool
+- include: registry_auth.yml
+
+- name: Disable the legacy master service if it exists
+ systemd:
+ name: "{{ openshift.common.service_type }}-master"
+ state: stopped
+ enabled: no
+ masked: yes
+ ignore_errors: true
+
+- name: Remove the legacy master service if it exists
+ file:
+ path: "{{ containerized_svc_dir }}/{{ openshift.common.service_type }}-master.service"
+ state: absent
+ ignore_errors: true
+ when:
+ - openshift.master.cluster_method == "native"
+ - not openshift.common.is_master_system_container | bool
+
# This is the image used for both HA and non-HA clusters:
- name: Pre-pull master image
command: >
diff --git a/roles/openshift_master/tasks/upgrade_facts.yml b/roles/openshift_master/tasks/upgrade_facts.yml
new file mode 100644
index 000000000..2252c003a
--- /dev/null
+++ b/roles/openshift_master/tasks/upgrade_facts.yml
@@ -0,0 +1,37 @@
+---
+# This file exists because we call systemd_units.yml from outside of the role
+# during upgrades. When we remove this pattern, we can probably
+# eliminate most of these set_fact items.
+
+- name: Set openshift_master_config_dir if unset
+ set_fact:
+ openshift_master_config_dir: '/etc/origin/master'
+ when: openshift_master_config_dir is not defined
+
+- name: Set r_openshift_master_data_dir if unset
+ set_fact:
+ r_openshift_master_data_dir: "{{ openshift_data_dir | default('/var/lib/origin') }}"
+ when: r_openshift_master_data_dir is not defined
+
+- set_fact:
+ oreg_auth_credentials_path: "{{ r_openshift_master_data_dir }}/.docker"
+ when: oreg_auth_credentials_path is not defined
+
+- set_fact:
+ oreg_host: "{{ oreg_url.split('/')[0] if (oreg_url is defined and '.' in oreg_url.split('/')[0]) else '' }}"
+ when: oreg_host is not defined
+
+- set_fact:
+ oreg_auth_credentials_replace: False
+ when: oreg_auth_credentials_replace is not defined
+
+- name: Set openshift_master_debug_level
+ set_fact:
+ openshift_master_debug_level: "{{ debug_level | default(2) }}"
+ when:
+ - openshift_master_debug_level is not defined
+
+- name: Init HA Service Info
+ set_fact:
+ containerized_svc_dir: "{{ containerized_svc_dir | default('/usr/lib/systemd/system') }}"
+ ha_svc_template_path: "{{ ha_svc_template_path | default('native-cluster') }}"
diff --git a/roles/openshift_master/templates/atomic-openshift-master.j2 b/roles/openshift_master/templates/atomic-openshift-master.j2
index b931f1414..3f7a528a9 100644
--- a/roles/openshift_master/templates/atomic-openshift-master.j2
+++ b/roles/openshift_master/templates/atomic-openshift-master.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level | default(2) }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
@@ -21,7 +21,7 @@ AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key }}
{% endif %}
{% if 'api_env_vars' in openshift.master or 'controllers_env_vars' in openshift.master -%}
-{% for key, value in openshift.master.api_env_vars.items() | default([]) | union(openshift.master.controllers_env_vars.items() | default([])) -%}
+{% for key, value in (openshift.master.api_env_vars | default({})).items() | union((openshift.master.controllers_env_vars | default({})).items()) -%}
{{ key }}={{ value }}
{% endfor -%}
{% endif -%}
diff --git a/roles/openshift_master/templates/master.yaml.v1.j2 b/roles/openshift_master/templates/master.yaml.v1.j2
index d045b402b..c83fc9fbb 100644
--- a/roles/openshift_master/templates/master.yaml.v1.j2
+++ b/roles/openshift_master/templates/master.yaml.v1.j2
@@ -58,11 +58,12 @@ controllerConfig:
{% endif %}
controllers: '*'
corsAllowedOrigins:
+ # anchor with start (\A) and end (\z) of the string, make the check case insensitive ((?i)) and escape hostname
{% for origin in ['127.0.0.1', 'localhost', openshift.common.ip, openshift.common.public_ip] | union(openshift.common.all_hostnames) | unique %}
- - {{ origin }}
+ - (?i)\A{{ origin | regex_escape() }}\z
{% endfor %}
{% for custom_origin in openshift.master.custom_cors_origins | default("") %}
- - {{ custom_origin }}
+ - (?i)\A{{ custom_origin | regex_escape() }}\z
{% endfor %}
{% if 'disabled_features' in openshift.master %}
disabledFeatures: {{ openshift.master.disabled_features | to_json }}
@@ -115,7 +116,7 @@ etcdStorageConfig:
openShiftStorageVersion: v1
imageConfig:
format: {{ openshift.master.registry_url }}
- latest: false
+ latest: {{ openshift_master_image_config_latest }}
{% if 'image_policy_config' in openshift.master %}
imagePolicyConfig:{{ openshift.master.image_policy_config | to_padded_yaml(level=1) }}
{% endif %}
@@ -179,7 +180,12 @@ masterPublicURL: {{ openshift.master.public_api_url }}
networkConfig:
clusterNetworkCIDR: {{ openshift.master.sdn_cluster_network_cidr }}
hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
-{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_sdn_network_plugin_name == 'cni' %}
+{% if openshift.common.version_gte_3_7 | bool %}
+ clusterNetworks:
+ - cidr: {{ openshift.master.sdn_cluster_network_cidr }}
+ hostSubnetLength: {{ openshift.master.sdn_host_subnet_length }}
+{% endif %}
+{% if r_openshift_master_use_openshift_sdn or r_openshift_master_use_nuage or r_openshift_master_use_contiv or r_openshift_master_use_kuryr or r_openshift_master_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ r_openshift_master_sdn_network_plugin_name_default }}
{% endif %}
# serviceNetworkCIDR must match kubernetesMasterConfig.servicesSubnet
@@ -251,11 +257,7 @@ servingInfo:
bindAddress: {{ openshift.master.bind_addr }}:{{ openshift.master.api_port }}
bindNetwork: tcp4
certFile: master.server.crt
-{% if openshift.common.version_gte_3_2_or_1_2 | bool %}
- clientCA: ca-bundle.crt
-{% else %}
clientCA: ca.crt
-{% endif %}
keyFile: master.server.key
maxRequestsInFlight: {{ openshift.master.max_requests_inflight }}
requestTimeoutSeconds: 3600
@@ -279,12 +281,5 @@ servingInfo:
- {{ cipher_suite }}
{% endfor %}
{% endif %}
-{% if openshift_template_service_broker_namespaces is defined %}
-templateServiceBrokerConfig:
- templateNamespaces:
-{% for namespace in openshift_template_service_broker_namespaces %}
- - {{ namespace }}
-{% endfor %}
-{% endif %}
volumeConfig:
dynamicProvisioningEnabled: {{ openshift.master.dynamic_provisioning_enabled }}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
index 63eb3ea1b..cc21b37af 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-api.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.api_port }} --master={{ openshift.master.loopback_api_url }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
diff --git a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2 b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
index 0adfd05b6..493fc510e 100644
--- a/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
+++ b/roles/openshift_master/templates/native-cluster/atomic-openshift-master-controllers.j2
@@ -1,4 +1,4 @@
-OPTIONS=--loglevel={{ openshift.master.debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
+OPTIONS=--loglevel={{ openshift_master_debug_level }} --listen={{ 'https' if openshift.master.api_use_ssl else 'http' }}://{{ openshift.master.bind_addr }}:{{ openshift.master.controllers_port }}
CONFIG_FILE={{ openshift_master_config_file }}
{# Preserve existing OPENSHIFT_DEFAULT_REGISTRY settings in scale up runs #}
{% if openshift_master_is_scaleup_host %}
diff --git a/roles/openshift_master_facts/filter_plugins/openshift_master.py b/roles/openshift_master_facts/filter_plugins/openshift_master.py
index f7f3ac2b1..a4f410296 100644
--- a/roles/openshift_master_facts/filter_plugins/openshift_master.py
+++ b/roles/openshift_master_facts/filter_plugins/openshift_master.py
@@ -363,7 +363,6 @@ class OpenIDIdentityProvider(IdentityProviderOauthBase):
def validate(self):
''' validate this idp instance '''
- IdentityProviderOauthBase.validate(self)
if not isinstance(self.provider['claims'], dict):
raise errors.AnsibleFilterError("|failed claims for provider {0} "
"must be a dictionary".format(self.__class__.__name__))
diff --git a/roles/openshift_master_facts/tasks/main.yml b/roles/openshift_master_facts/tasks/main.yml
index a95570d38..cf0be3bef 100644
--- a/roles/openshift_master_facts/tasks/main.yml
+++ b/roles/openshift_master_facts/tasks/main.yml
@@ -34,7 +34,6 @@
cluster_method: "{{ openshift_master_cluster_method | default('native') }}"
cluster_hostname: "{{ openshift_master_cluster_hostname | default(None) }}"
cluster_public_hostname: "{{ openshift_master_cluster_public_hostname | default(None) }}"
- debug_level: "{{ openshift_master_debug_level | default(openshift.common.debug_level) }}"
api_port: "{{ openshift_master_api_port | default(None) }}"
api_url: "{{ openshift_master_api_url | default(None) }}"
api_use_ssl: "{{ openshift_master_api_use_ssl | default(None) }}"
@@ -89,7 +88,6 @@
controller_args: "{{ osm_controller_args | default(None) }}"
disabled_features: "{{ osm_disabled_features | default(None) }}"
master_count: "{{ openshift_master_count | default(None) }}"
- controller_lease_ttl: "{{ osm_controller_lease_ttl | default(None) }}"
master_image: "{{ osm_image | default(None) }}"
admission_plugin_config: "{{openshift_master_admission_plugin_config }}"
kube_admission_plugin_config: "{{openshift_master_kube_admission_plugin_config | default(None) }}" # deprecated, merged with admission_plugin_config
diff --git a/roles/openshift_metrics/README.md b/roles/openshift_metrics/README.md
index ed698daca..b74f22c00 100644
--- a/roles/openshift_metrics/README.md
+++ b/roles/openshift_metrics/README.md
@@ -109,3 +109,78 @@ Author Information
------------------
Jose David Martín (j.david.nieto@gmail.com)
+
+Image update procedure
+----------------------
+An upgrade of the metrics stack from older version to newer is an automated process and should be performed by calling appropriate ansible playbook and setting required ansible variables in your inventory as documented in https://docs.openshift.org/.
+
+Following text describes manual update of the metrics images without version upgrade. To determine the current version of images being used you can:
+```
+oc describe pod | grep 'Image ID:'
+```
+This will get the repo digest that can later be compared to the inspected image details.
+
+A way to determine when was your image last updated:
+```
+$ docker images
+REPOSITORY TAG IMAGE ID CREATED SIZE
+<registry>/openshift3/origin-metrics-cassandra v3.7 f8ad8d569e27 14 hours ago 783.7 MB
+
+$ docker inspect 9c3597aeb39f
+[
+ {
+ . . .
+ "RepoDigests": [
+ "<registry>/openshift3/metrics-cassandra@sha256:d37fc0cab268625b53a92bb98d09fcc501cfca1c68e16bac6dd98446d32ba135
+ ],
+ . . .
+ "Config": {
+ . . .
+ "Labels": {
+ . . .
+ "build-date": "2017-10-17T16:47:44.350655",
+ . . .
+ "release": "0.143.4.0",
+ . . .
+ "url": "https://access.redhat.com/containers/#/registry.access.redhat.com/openshift3/metrics-cassandra/images/v3.7.0-0.143.4.0",
+ . . .
+ "version": "v3.7.0"
+ }
+ },
+ . . .
+```
+
+Pull a new image to see if registry has any newer images with the same tag:
+```
+$ docker pull <registry>/openshift3/origin-metrics-cassandra:v3.7
+```
+
+If there was an update, you need to run the `docker pull` on each node.
+
+It is recommended that you now rerun the `openshift_metrics` playbook to ensure that any necessary config changes are also picked up.
+
+To manually redeploy your pod you can do the following:
+- for a DC you can do:
+```
+oc rollout latest <dc_name>
+```
+
+- for a RC you can scale down and scale back up
+```
+oc scale --replicas=0 <rc_name>
+
+... wait for scale down
+
+oc scale --replicas=<original_replica_count> <rc_name>
+```
+
+- for a DS you can delete the pod or unlabel and relabel your node
+```
+oc delete pod --selector=<ds_selector>
+```
+
+Changelog
+---------
+
+Tue Oct 10, 2017
+- Default imagePullPolicy changed from Always to IfNotPresent
diff --git a/roles/openshift_metrics/tasks/install_cassandra.yaml b/roles/openshift_metrics/tasks/install_cassandra.yaml
index 7928a0346..48584bd64 100644
--- a/roles/openshift_metrics/tasks/install_cassandra.yaml
+++ b/roles/openshift_metrics/tasks/install_cassandra.yaml
@@ -54,6 +54,7 @@
access_modes: "{{ openshift_metrics_cassandra_pvc_access | list }}"
size: "{{ openshift_metrics_cassandra_pvc_size }}"
pv_selector: "{{ openshift_metrics_cassandra_pv_selector }}"
+ storage_class_name: "{{ openshift_metrics_cassanda_pvc_storage_class_name | default('', true) }}"
with_sequence: count={{ openshift_metrics_cassandra_replicas }}
when: openshift_metrics_cassandra_storage_type == 'dynamic'
changed_when: false
diff --git a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2 b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
index 6f341bcfb..6a3811598 100644
--- a/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_cassandra_rc.j2
@@ -30,7 +30,7 @@ spec:
{% endif %}
containers:
- image: "{{ openshift_metrics_image_prefix }}metrics-cassandra:{{ openshift_metrics_image_version }}"
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
name: hawkular-cassandra-{{ node }}
ports:
- name: cql-port
diff --git a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2 b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
index 59f7fb44a..0662bea53 100644
--- a/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
+++ b/roles/openshift_metrics/templates/hawkular_metrics_rc.j2
@@ -25,7 +25,7 @@ spec:
{% endif %}
containers:
- image: {{openshift_metrics_image_prefix}}metrics-hawkular-metrics:{{openshift_metrics_image_version}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
name: hawkular-metrics
ports:
- name: http-endpoint
diff --git a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2 b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
index d65eaf9ae..40d09e9fa 100644
--- a/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
+++ b/roles/openshift_metrics/templates/hawkular_openshift_agent_ds.j2
@@ -25,7 +25,7 @@ spec:
{% endif %}
containers:
- image: {{openshift_metrics_image_prefix}}metrics-hawkular-openshift-agent:{{openshift_metrics_image_version}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
name: hawkular-openshift-agent
{% if ((openshift_metrics_hawkular_agent_limits_cpu is defined and openshift_metrics_hawkular_agent_limits_cpu is not none)
or (openshift_metrics_hawkular_agent_limits_memory is defined and openshift_metrics_hawkular_agent_limits_memory is not none)
diff --git a/roles/openshift_metrics/templates/heapster.j2 b/roles/openshift_metrics/templates/heapster.j2
index d8c7763ea..e732c1eee 100644
--- a/roles/openshift_metrics/templates/heapster.j2
+++ b/roles/openshift_metrics/templates/heapster.j2
@@ -27,7 +27,7 @@ spec:
containers:
- name: heapster
image: {{openshift_metrics_image_prefix}}metrics-heapster:{{openshift_metrics_image_version}}
- imagePullPolicy: Always
+ imagePullPolicy: IfNotPresent
ports:
- containerPort: 8082
name: "http-endpoint"
diff --git a/roles/openshift_named_certificates/tasks/named_certificates.yml b/roles/openshift_named_certificates/tasks/named_certificates.yml
deleted file mode 100644
index 7b097b443..000000000
--- a/roles/openshift_named_certificates/tasks/named_certificates.yml
+++ /dev/null
@@ -1,32 +0,0 @@
----
-- name: Clear named certificates
- file:
- path: "{{ named_certs_dir }}"
- state: absent
- when: overwrite_named_certs | bool
-
-- name: Ensure named certificate directory exists
- file:
- path: "{{ named_certs_dir }}"
- state: directory
- mode: 0700
-
-- name: Land named certificates
- copy:
- src: "{{ item.certfile }}"
- dest: "{{ named_certs_dir }}"
- with_items: "{{ openshift_master_named_certificates | default([]) }}"
-
-- name: Land named certificate keys
- copy:
- src: "{{ item.keyfile }}"
- dest: "{{ named_certs_dir }}"
- mode: 0600
- with_items: "{{ openshift_master_named_certificates | default([]) }}"
-
-- name: Land named CA certificates
- copy:
- src: "{{ item }}"
- dest: "{{ named_certs_dir }}"
- mode: 0600
- with_items: "{{ openshift_master_named_certificates | default([]) | oo_collect('cafile') }}"
diff --git a/roles/openshift_nfs/README.md b/roles/openshift_nfs/README.md
new file mode 100644
index 000000000..36ea36385
--- /dev/null
+++ b/roles/openshift_nfs/README.md
@@ -0,0 +1,17 @@
+OpenShift NFS
+=============
+
+Sets up basic NFS services on a cluster host.
+
+See [tasks/create_export.yml](tasks/create_export.yml) for
+instructions on using the export creation tasks file.
+
+License
+-------
+
+Apache License, Version 2.0
+
+Author Information
+------------------
+
+Tim Bielawa (tbielawa@redhat.com)
diff --git a/roles/openshift_nfs/defaults/main.yml b/roles/openshift_nfs/defaults/main.yml
new file mode 100644
index 000000000..ee94c7c57
--- /dev/null
+++ b/roles/openshift_nfs/defaults/main.yml
@@ -0,0 +1,8 @@
+---
+r_openshift_nfs_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
+r_openshift_nfs_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
+
+r_openshift_nfs_os_firewall_deny: []
+r_openshift_nfs_firewall_allow:
+- service: nfs
+ port: "2049/tcp"
diff --git a/roles/openshift_nfs/meta/main.yml b/roles/openshift_nfs/meta/main.yml
new file mode 100644
index 000000000..d7b5910f2
--- /dev/null
+++ b/roles/openshift_nfs/meta/main.yml
@@ -0,0 +1,16 @@
+---
+galaxy_info:
+ author: Tim Bielawa
+ description: OpenShift Basic NFS Configuration
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.2
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
+dependencies:
+- role: lib_utils
+- role: lib_os_firewall
diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml
new file mode 100644
index 000000000..b0b888d56
--- /dev/null
+++ b/roles/openshift_nfs/tasks/create_export.yml
@@ -0,0 +1,34 @@
+---
+# Makes a new NFS export
+#
+# Include signature
+#
+# include_role:
+# role: openshift_nfs
+# tasks_from: create_export
+# vars:
+# l_nfs_base_dir: Base dir to exports
+# l_nfs_export_config: Name to prefix the .exports file with
+# l_nfs_export_name: Name of sub-directory of the export
+# l_nfs_options: Mount Options
+
+- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists"
+ file:
+ path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}"
+ state: directory
+ mode: 0777
+ owner: nfsnobody
+ group: nfsnobody
+
+- name: "Create {{ l_nfs_export_name }} NFS export"
+ lineinfile:
+ path: "/etc/exports.d/{{ l_nfs_export_config }}.exports"
+ create: true
+ state: present
+ line: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }} {{ l_nfs_options }}"
+ register: created_export
+
+- name: Re-export NFS filesystems
+ command: exportfs -ar
+ when:
+ - created_export | changed
diff --git a/roles/openshift_nfs/tasks/firewall.yml b/roles/openshift_nfs/tasks/firewall.yml
new file mode 100644
index 000000000..0898b2b5c
--- /dev/null
+++ b/roles/openshift_nfs/tasks/firewall.yml
@@ -0,0 +1,40 @@
+---
+- when: r_openshift_nfs_firewall_enabled | bool and not r_openshift_nfs_use_firewalld | bool
+ block:
+ - name: Add iptables allow rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: add
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ when: item.cond | default(True)
+ with_items: "{{ r_openshift_nfs_firewall_allow }}"
+
+ - name: Remove iptables rules
+ os_firewall_manage_iptables:
+ name: "{{ item.service }}"
+ action: remove
+ protocol: "{{ item.port.split('/')[1] }}"
+ port: "{{ item.port.split('/')[0] }}"
+ when: item.cond | default(True)
+ with_items: "{{ r_openshift_nfs_os_firewall_deny }}"
+
+- when: r_openshift_nfs_firewall_enabled | bool and r_openshift_nfs_use_firewalld | bool
+ block:
+ - name: Add firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ immediate: true
+ state: enabled
+ when: item.cond | default(True)
+ with_items: "{{ r_openshift_nfs_firewall_allow }}"
+
+ - name: Remove firewalld allow rules
+ firewalld:
+ port: "{{ item.port }}"
+ permanent: true
+ immediate: true
+ state: disabled
+ when: item.cond | default(True)
+ with_items: "{{ r_openshift_nfs_os_firewall_deny }}"
diff --git a/roles/openshift_nfs/tasks/setup.yml b/roles/openshift_nfs/tasks/setup.yml
new file mode 100644
index 000000000..3070de495
--- /dev/null
+++ b/roles/openshift_nfs/tasks/setup.yml
@@ -0,0 +1,29 @@
+---
+- name: setup firewall
+ include: firewall.yml
+ static: yes
+
+- name: Install nfs-utils
+ package: name=nfs-utils state=present
+
+- name: Configure NFS
+ lineinfile:
+ dest: /etc/sysconfig/nfs
+ regexp: '^RPCNFSDARGS=.*$'
+ line: 'RPCNFSDARGS="-N 2 -N 3"'
+ register: nfs_config
+
+- name: Restart nfs-config
+ systemd: name=nfs-config state=restarted
+ when: nfs_config | changed
+
+- name: Ensure exports directory exists
+ file:
+ path: "{{ l_nfs_base_dir }}"
+ state: directory
+
+- name: Enable and start NFS services
+ systemd:
+ name: nfs-server
+ state: started
+ enabled: yes
diff --git a/roles/openshift_node/defaults/main.yml b/roles/openshift_node/defaults/main.yml
index 1214c08e5..37f48e724 100644
--- a/roles/openshift_node/defaults/main.yml
+++ b/roles/openshift_node/defaults/main.yml
@@ -1,12 +1,15 @@
---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+
r_openshift_node_firewall_enabled: "{{ os_firewall_enabled | default(True) }}"
r_openshift_node_use_firewalld: "{{ os_firewall_use_firewalld | default(False) }}"
-openshift_service_type: "{{ openshift.common.service_type }}"
+openshift_deployment_type: "{{ openshift_deployment_type | default('origin') }}"
+openshift_service_type: "{{ 'origin' if openshift_deployment_type == 'origin' else 'atomic-openshift' }}"
openshift_image_tag: ''
-openshift_node_ami_prep_packages:
+default_r_openshift_node_image_prep_packages:
- "{{ openshift_service_type }}-master"
- "{{ openshift_service_type }}-node"
- "{{ openshift_service_type }}-docker-excluder"
@@ -15,7 +18,6 @@ openshift_node_ami_prep_packages:
- openvswitch
- docker
- etcd
-#- pcs
- haproxy
- dnsmasq
- ntp
@@ -31,7 +33,6 @@ openshift_node_ami_prep_packages:
- python-dbus
- PyYAML
- yum-utils
-- cloud-utils-growpart
# gluster
- glusterfs-fuse
# nfs
@@ -52,7 +53,7 @@ openshift_node_ami_prep_packages:
# - container-selinux
# - atomic
#
-openshift_deployment_type: origin
+r_openshift_node_image_prep_packages: "{{ default_r_openshift_node_image_prep_packages | union(openshift_node_image_prep_packages | default([])) }}"
openshift_node_bootstrap: False
@@ -103,5 +104,11 @@ openshift_node_use_nuage: "{{ openshift_node_use_nuage_default }}"
openshift_node_use_contiv_default: "{{ openshift_use_contiv | default(False) }}"
openshift_node_use_contiv: "{{ openshift_node_use_contiv_default }}"
+openshift_node_use_kuryr_default: "{{ openshift_use_kuryr | default(False) }}"
+openshift_node_use_kuryr: "{{ openshift_node_use_kuryr_default }}"
+
openshift_node_data_dir_default: "{{ openshift_data_dir | default('/var/lib/origin') }}"
openshift_node_data_dir: "{{ openshift_node_data_dir_default }}"
+
+openshift_node_image_config_latest_default: "{{ openshift_image_config_latest | default(False) }}"
+openshift_node_image_config_latest: "{{ openshift_node_image_config_latest_default }}"
diff --git a/roles/openshift_node/files/bootstrap.yml b/roles/openshift_node/files/bootstrap.yml
new file mode 100644
index 000000000..ea280640f
--- /dev/null
+++ b/roles/openshift_node/files/bootstrap.yml
@@ -0,0 +1,63 @@
+#!/usr/bin/ansible-playbook
+---
+- hosts: localhost
+ gather_facts: yes
+ vars:
+ origin_dns:
+ file: /etc/dnsmasq.d/origin-dns.conf
+ lines:
+ - regex: ^listen-address
+ state: present
+ line: "listen-address={{ ansible_default_ipv4.address }}"
+ node_dns:
+ file: /etc/dnsmasq.d/node-dnsmasq.conf
+ lines:
+ - regex: "^server=/in-addr.arpa/127.0.0.1$"
+ line: server=/in-addr.arpa/127.0.0.1
+ - regex: "^server=/cluster.local/127.0.0.1$"
+ line: server=/cluster.local/127.0.0.1
+
+ tasks:
+ - include_vars: openshift_settings.yaml
+
+ - name: set the data for node_dns
+ lineinfile:
+ create: yes
+ insertafter: EOF
+ path: "{{ node_dns.file }}"
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line | default(omit) }}"
+ with_items: "{{ node_dns.lines }}"
+
+ - name: set the data for origin_dns
+ lineinfile:
+ create: yes
+ state: "{{ item.state | default('present') }}"
+ insertafter: "{{ item.after | default(omit) }}"
+ path: "{{ origin_dns.file }}"
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line | default(omit)}}"
+ with_items: "{{ origin_dns.lines }}"
+
+ - when:
+ - openshift_group_type is defined
+ - openshift_group_type != ''
+ - openshift_group_type != 'master'
+ block:
+ - name: determine the openshift_service_type
+ stat:
+ path: /etc/sysconfig/atomic-openshift-node
+ register: service_type_results
+
+ - name: set openshift_service_type fact based on stat results
+ set_fact:
+ openshift_service_type: "{{ service_type_results.stat.exists | ternary('atomic-openshift', 'origin') }}"
+
+ - name: update the sysconfig to have necessary variables
+ lineinfile:
+ dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
+ line: "{{ item.line }}"
+ regexp: "{{ item.regexp }}"
+ with_items:
+ - line: "BOOTSTRAP_CONFIG_NAME=node-config-{{ openshift_group_type }}"
+ regexp: "^BOOTSTRAP_CONFIG_NAME=.*"
diff --git a/roles/openshift_node/handlers/main.yml b/roles/openshift_node/handlers/main.yml
index 25a6fc721..b102c1b18 100644
--- a/roles/openshift_node/handlers/main.yml
+++ b/roles/openshift_node/handlers/main.yml
@@ -3,7 +3,11 @@
systemd:
name: openvswitch
state: restarted
- when: (not skip_node_svc_handlers | default(False) | bool) and not (ovs_service_status_changed | default(false) | bool) and openshift_node_use_openshift_sdn | bool
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - not (ovs_service_status_changed | default(false) | bool)
+ - openshift_node_use_openshift_sdn | bool
+ - not openshift_node_bootstrap
register: l_openshift_node_stop_openvswitch_result
until: not l_openshift_node_stop_openvswitch_result | failed
retries: 3
@@ -11,10 +15,11 @@
notify:
- restart openvswitch pause
-
- name: restart openvswitch pause
pause: seconds=15
- when: (not skip_node_svc_handlers | default(False) | bool) and openshift.common.is_containerized | bool
+ when:
+ - (not skip_node_svc_handlers | default(False) | bool)
+ - openshift.common.is_containerized | bool
- name: restart node
systemd:
diff --git a/roles/openshift_node/tasks/aws.yml b/roles/openshift_node/tasks/aws.yml
new file mode 100644
index 000000000..38c2b794d
--- /dev/null
+++ b/roles/openshift_node/tasks/aws.yml
@@ -0,0 +1,21 @@
+---
+- name: Configure AWS Cloud Provider Settings
+ lineinfile:
+ dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
+ regexp: "{{ item.regex }}"
+ line: "{{ item.line }}"
+ create: true
+ with_items:
+ - regex: '^AWS_ACCESS_KEY_ID='
+ line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
+ - regex: '^AWS_SECRET_ACCESS_KEY='
+ line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
+ register: sys_env_update
+ no_log: True
+ when:
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'aws'
+ - openshift_cloudprovider_aws_access_key is defined
+ - openshift_cloudprovider_aws_secret_key is defined
+ notify:
+ - restart node
diff --git a/roles/openshift_node/tasks/bootstrap.yml b/roles/openshift_node/tasks/bootstrap.yml
index b83b2c452..cf22181a8 100644
--- a/roles/openshift_node/tasks/bootstrap.yml
+++ b/roles/openshift_node/tasks/bootstrap.yml
@@ -3,7 +3,7 @@
package:
name: "{{ item }}"
state: present
- with_items: "{{ openshift_node_ami_prep_packages }}"
+ with_items: "{{ r_openshift_node_image_prep_packages }}"
- name: create the directory for node
file:
@@ -17,19 +17,31 @@
[Unit]
After=cloud-init.service
-- name: update the sysconfig to have KUBECONFIG
+- name: update the sysconfig to have necessary variables
lineinfile:
dest: "/etc/sysconfig/{{ openshift_service_type }}-node"
- line: "KUBECONFIG=/root/csr_kubeconfig"
+ line: "{{ item.line | default(omit) }}"
+ regexp: "{{ item.regexp }}"
+ state: "{{ item.state | default('present') }}"
+ with_items:
+ # add the kubeconfig
+ - line: "KUBECONFIG=/etc/origin/node/bootstrap.kubeconfig"
regexp: "^KUBECONFIG=.*"
+ # remove the config file. This comes from openshift_facts
+ - regexp: "^CONFIG_FILE=.*"
+ state: absent
-- name: update the ExecStart to have bootstrap
- lineinfile:
- dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service"
- line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
- regexp: "^ExecStart=.*"
+- name: include aws sysconfig credentials
+ include: aws.yml
+ static: yes
+
+#- name: update the ExecStart to have bootstrap
+# lineinfile:
+# dest: "/usr/lib/systemd/system/{{ openshift_service_type }}-node.service"
+# line: "{% raw %}ExecStart=/usr/bin/openshift start node --bootstrap --kubeconfig=${KUBECONFIG} $OPTIONS{% endraw %}"
+# regexp: "^ExecStart=.*"
-- name: "systemctl enable {{ openshift_service_type }}-node"
+- name: "disable {{ openshift_service_type }}-node and {{ openshift_service_type }}-master services"
systemd:
name: "{{ item }}"
enabled: no
@@ -42,6 +54,30 @@
path: /etc/origin/.config_managed
register: rpmgenerated_config
+- name: create directories for bootstrapping
+ file:
+ state: directory
+ dest: "{{ item }}"
+ with_items:
+ - /root/openshift_bootstrap
+ - /var/lib/origin/openshift.local.config
+ - /var/lib/origin/openshift.local.config/node
+ - "/etc/docker/certs.d/docker-registry.default.svc:5000"
+
+- name: laydown the bootstrap.yml file for on boot configuration
+ copy:
+ src: bootstrap.yml
+ dest: /root/openshift_bootstrap/bootstrap.yml
+
+- name: symlink master ca for docker-registry
+ file:
+ src: "{{ item }}"
+ dest: "/etc/docker/certs.d/docker-registry.default.svc:5000/{{ item | basename }}"
+ state: link
+ force: yes
+ with_items:
+ - /var/lib/origin/openshift.local.config/node/node-client-ca.crt
+
- when: rpmgenerated_config.stat.exists
block:
- name: Remove RPM generated config files if present
@@ -50,6 +86,7 @@
state: absent
with_items:
- master
+ - .config_managed
# with_fileglob doesn't work correctly due to a few issues.
# Could change this to fileglob when it gets fixed.
@@ -62,5 +99,7 @@
file:
path: "{{ item.path }}"
state: absent
- when: "'resolv.conf' not in item.path or 'node-dnsmasq.conf' not in item.path"
+ when:
+ - "'resolv.conf' not in item.path"
+ - "'node-dnsmasq.conf' not in item.path"
with_items: "{{ find_results.files }}"
diff --git a/roles/openshift_node/tasks/config.yml b/roles/openshift_node/tasks/config.yml
index e3898b520..c08f43118 100644
--- a/roles/openshift_node/tasks/config.yml
+++ b/roles/openshift_node/tasks/config.yml
@@ -46,26 +46,16 @@
notify:
- restart node
-- name: Configure AWS Cloud Provider Settings
- lineinfile:
- dest: /etc/sysconfig/{{ openshift.common.service_type }}-node
- regexp: "{{ item.regex }}"
- line: "{{ item.line }}"
- create: true
- with_items:
- - regex: '^AWS_ACCESS_KEY_ID='
- line: "AWS_ACCESS_KEY_ID={{ openshift_cloudprovider_aws_access_key | default('') }}"
- - regex: '^AWS_SECRET_ACCESS_KEY='
- line: "AWS_SECRET_ACCESS_KEY={{ openshift_cloudprovider_aws_secret_key | default('') }}"
- no_log: True
- when: openshift_cloudprovider_kind is defined and openshift_cloudprovider_kind == 'aws' and openshift_cloudprovider_aws_access_key is defined and openshift_cloudprovider_aws_secret_key is defined
- notify:
- - restart node
+- name: include aws provider credentials
+ include: aws.yml
+ static: yes
# Necessary because when you're on a node that's also a master the master will be
# restarted after the node restarts docker and it will take up to 60 seconds for
# systemd to start the master again
-- when: openshift.common.is_containerized | bool
+- when:
+ - openshift.common.is_containerized | bool
+ - not openshift_node_bootstrap
block:
- name: Wait for master API to become available before proceeding
# Using curl here since the uri module requires python-httplib2 and
@@ -90,30 +80,28 @@
enabled: yes
state: started
-- name: Start and enable node
- systemd:
- name: "{{ openshift.common.service_type }}-node"
- enabled: yes
- state: started
- daemon_reload: yes
- register: node_start_result
- until: not node_start_result | failed
- retries: 1
- delay: 30
- ignore_errors: true
-
-- name: Dump logs from node service if it failed
- command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
- when: node_start_result | failed
+- when: not openshift_node_bootstrap
+ block:
+ - name: Start and enable node
+ systemd:
+ name: "{{ openshift.common.service_type }}-node"
+ enabled: yes
+ state: started
+ daemon_reload: yes
+ register: node_start_result
+ until: not node_start_result | failed
+ retries: 1
+ delay: 30
+ ignore_errors: true
-- name: Abort if node failed to start
- fail:
- msg: Node failed to start please inspect the logs and try again
- when: node_start_result | failed
+ - name: Dump logs from node service if it failed
+ command: journalctl --no-pager -n 100 -u {{ openshift.common.service_type }}-node
+ when: node_start_result | failed
-- name: Setup tuned
- include: tuned.yml
- static: yes
+ - name: Abort if node failed to start
+ fail:
+ msg: Node failed to start please inspect the logs and try again
+ when: node_start_result | failed
-- set_fact:
- node_service_status_changed: "{{ node_start_result | changed }}"
+ - set_fact:
+ node_service_status_changed: "{{ node_start_result | changed }}"
diff --git a/roles/openshift_node/tasks/config/configure-node-settings.yml b/roles/openshift_node/tasks/config/configure-node-settings.yml
index 1186062eb..527580481 100644
--- a/roles/openshift_node/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node/tasks/config/configure-node-settings.yml
@@ -7,7 +7,7 @@
create: true
with_items:
- regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- regex: '^CONFIG_FILE='
line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- regex: '^IMAGE_VERSION='
diff --git a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml b/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
deleted file mode 100644
index f92ff79b5..000000000
--- a/roles/openshift_node/tasks/config/install-node-docker-service-file.yml
+++ /dev/null
@@ -1,8 +0,0 @@
----
-- name: Install Node docker service file
- template:
- dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: openshift.docker.node.service
- notify:
- - reload systemd units
- - restart node
diff --git a/roles/openshift_node/tasks/install.yml b/roles/openshift_node/tasks/install.yml
index 265bf2c46..6b7e40491 100644
--- a/roles/openshift_node/tasks/install.yml
+++ b/roles/openshift_node/tasks/install.yml
@@ -3,12 +3,12 @@
block:
- name: Install Node package
package:
- name: "{{ openshift.common.service_type }}-node{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-node{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
- name: Install sdn-ovs package
package:
- name: "{{ openshift.common.service_type }}-sdn-ovs{{ openshift_pkg_version | oo_image_tag_to_rpm_version(include_dash=True) }}"
+ name: "{{ openshift.common.service_type }}-sdn-ovs{{ (openshift_pkg_version | default('')) | oo_image_tag_to_rpm_version(include_dash=True) }}"
state: present
when:
- openshift_node_use_openshift_sdn | bool
@@ -27,5 +27,3 @@
docker pull {{ openshift.node.node_image }}:{{ openshift_image_tag }}
register: pull_result
changed_when: "'Downloaded newer image' in pull_result.stdout"
-
- - include: config/install-node-docker-service-file.yml
diff --git a/roles/openshift_node/tasks/main.yml b/roles/openshift_node/tasks/main.yml
index 59b8bb76e..eae9ca7bc 100644
--- a/roles/openshift_node/tasks/main.yml
+++ b/roles/openshift_node/tasks/main.yml
@@ -66,15 +66,10 @@
sysctl_file: "/etc/sysctl.d/99-openshift.conf"
reload: yes
-- name: include bootstrap node config
- include: bootstrap.yml
- when: openshift_node_bootstrap
-
- include: registry_auth.yml
- name: include standard node config
include: config.yml
- when: not openshift_node_bootstrap
#### Storage class plugins here ####
- name: NFS storage plugin configuration
@@ -98,3 +93,7 @@
- include: config/workaround-bz1331590-ovs-oom-fix.yml
when: openshift_node_use_openshift_sdn | default(true) | bool
+
+- name: include bootstrap node config
+ include: bootstrap.yml
+ when: openshift_node_bootstrap
diff --git a/roles/openshift_node/tasks/node_system_container.yml b/roles/openshift_node/tasks/node_system_container.yml
index 20d7a9539..164a79b39 100644
--- a/roles/openshift_node/tasks/node_system_container.yml
+++ b/roles/openshift_node/tasks/node_system_container.yml
@@ -1,4 +1,9 @@
---
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull node system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.node_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/openvswitch_system_container.yml b/roles/openshift_node/tasks/openvswitch_system_container.yml
index e09063aa5..0f73ce454 100644
--- a/roles/openshift_node/tasks/openvswitch_system_container.yml
+++ b/roles/openshift_node/tasks/openvswitch_system_container.yml
@@ -10,6 +10,11 @@
l_service_name: "{{ openshift.docker.service_name }}"
when: not l_use_crio
+- name: Ensure proxies are in the atomic.conf
+ include_role:
+ name: openshift_atomic
+ tasks_from: proxy
+
- name: Pre-pull OpenVSwitch system container image
command: >
atomic pull --storage=ostree {{ 'docker:' if openshift.common.system_images_registry == 'docker' else openshift.common.system_images_registry + '/' }}{{ openshift.node.ovs_system_image }}:{{ openshift_image_tag }}
diff --git a/roles/openshift_node/tasks/registry_auth.yml b/roles/openshift_node/tasks/registry_auth.yml
index de396fb4b..5e5e4f94a 100644
--- a/roles/openshift_node/tasks/registry_auth.yml
+++ b/roles/openshift_node/tasks/registry_auth.yml
@@ -11,6 +11,9 @@
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
+ retries: 3
+ delay: 5
+ until: node_oreg_auth_credentials_create.rc == 0
notify:
- restart node
diff --git a/roles/openshift_node/tasks/systemd_units.yml b/roles/openshift_node/tasks/systemd_units.yml
index 6b4490f61..9c182ade6 100644
--- a/roles/openshift_node/tasks/systemd_units.yml
+++ b/roles/openshift_node/tasks/systemd_units.yml
@@ -1,11 +1,9 @@
---
-# This file is included both in the openshift_master role and in the upgrade
-# playbooks.
- name: Install Node service file
template:
dest: "/etc/systemd/system/{{ openshift.common.service_type }}-node.service"
- src: "node.service.j2"
- when: not openshift.common.is_containerized | bool
+ src: "{{ openshift.common.is_containerized | bool | ternary('openshift.docker.node.service', 'node.service.j2') }}"
+ when: not openshift.common.is_node_system_container | bool
notify:
- reload systemd units
- restart node
diff --git a/roles/openshift_node/templates/node.service.j2 b/roles/openshift_node/templates/node.service.j2
index 0856737f6..7602d8ee6 100644
--- a/roles/openshift_node/templates/node.service.j2
+++ b/roles/openshift_node/templates/node.service.j2
@@ -12,17 +12,17 @@ After=dnsmasq.service
[Service]
Type=notify
-EnvironmentFile=/etc/sysconfig/{{ openshift.common.service_type }}-node
+EnvironmentFile=/etc/sysconfig/{{ openshift_service_type }}-node
Environment=GOTRACEBACK=crash
ExecStartPre=/usr/bin/cp /etc/origin/node/node-dnsmasq.conf /etc/dnsmasq.d/
ExecStartPre=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:/in-addr.arpa/127.0.0.1,/{{ openshift.common.dns_domain }}/127.0.0.1
ExecStopPost=/usr/bin/rm /etc/dnsmasq.d/node-dnsmasq.conf
ExecStopPost=/usr/bin/dbus-send --system --dest=uk.org.thekelleys.dnsmasq /uk/org/thekelleys/dnsmasq uk.org.thekelleys.SetDomainServers array:string:
-ExecStart=/usr/bin/openshift start node --config=${CONFIG_FILE} $OPTIONS
+ExecStart=/usr/bin/openshift start node {% if openshift_node_bootstrap %} --kubeconfig=${KUBECONFIG} --bootstrap-config-name=${BOOTSTRAP_CONFIG_NAME}{% endif %} --config=${CONFIG_FILE} $OPTIONS
LimitNOFILE=65536
LimitCORE=infinity
WorkingDirectory=/var/lib/origin/
-SyslogIdentifier={{ openshift.common.service_type }}-node
+SyslogIdentifier={{ openshift_service_type }}-node
Restart=always
RestartSec=5s
TimeoutStartSec=300
diff --git a/roles/openshift_node/templates/node.yaml.v1.j2 b/roles/openshift_node/templates/node.yaml.v1.j2
index 7049f7189..718d35dca 100644
--- a/roles/openshift_node/templates/node.yaml.v1.j2
+++ b/roles/openshift_node/templates/node.yaml.v1.j2
@@ -13,7 +13,7 @@ dockerConfig:
iptablesSyncPeriod: "{{ openshift.node.iptables_sync_period }}"
imageConfig:
format: {{ openshift.node.registry_url }}
- latest: false
+ latest: {{ openshift_node_image_config_latest }}
kind: NodeConfig
kubeletArguments: {{ openshift.node.kubelet_args | default(None) | to_padded_yaml(level=1) }}
{% if openshift_use_crio | default(False) %}
@@ -44,7 +44,7 @@ networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
# deprecates networkPluginName above. The two should match.
networkConfig:
mtu: {{ openshift.node.sdn_mtu }}
-{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
+{% if openshift_node_use_openshift_sdn | bool or openshift_node_use_nuage | bool or openshift_node_use_contiv | bool or openshift_node_use_kuryr | bool or openshift_node_sdn_network_plugin_name == 'cni' %}
networkPluginName: {{ openshift_node_sdn_network_plugin_name }}
{% endif %}
{% if openshift.node.set_node_ip | bool %}
@@ -67,9 +67,11 @@ servingInfo:
{% endfor %}
{% endif %}
volumeDirectory: {{ openshift_node_data_dir }}/openshift.local.volumes
+{% if not (openshift_node_use_kuryr | default(False)) | bool %}
proxyArguments:
proxy-mode:
- {{ openshift.node.proxy_mode }}
+{% endif %}
volumeConfig:
localQuota:
perFSGroup: {{ openshift.node.local_quota_per_fsgroup }}
diff --git a/roles/openshift_node/templates/openshift.docker.node.service b/roles/openshift_node/templates/openshift.docker.node.service
index 310d8b29d..561aa01f4 100644
--- a/roles/openshift_node/templates/openshift.docker.node.service
+++ b/roles/openshift_node/templates/openshift.docker.node.service
@@ -6,6 +6,7 @@ PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
{% if openshift_node_use_openshift_sdn %}
Wants=openvswitch.service
+PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
diff --git a/roles/openshift_node_certificates/handlers/main.yml b/roles/openshift_node_certificates/handlers/main.yml
index 4abe8bcaf..ef66bf9ca 100644
--- a/roles/openshift_node_certificates/handlers/main.yml
+++ b/roles/openshift_node_certificates/handlers/main.yml
@@ -2,9 +2,21 @@
- name: update ca trust
command: update-ca-trust
notify:
- - restart docker after updating ca trust
+ - check for container runtime after updating ca trust
-- name: restart docker after updating ca trust
+- name: check for container runtime after updating ca trust
+ command: >
+ systemctl -q is-active {{ openshift.docker.service_name }}.service
+ register: l_docker_installed
+ # An rc of 0 indicates that the container runtime service is
+ # running. We will restart it by notifying the restart handler since
+ # we have updated the system CA trust.
+ changed_when: l_docker_installed.rc == 0
+ failed_when: false
+ notify:
+ - restart container runtime after updating ca trust
+
+- name: restart container runtime after updating ca trust
systemd:
name: "{{ openshift.docker.service_name }}"
state: restarted
diff --git a/roles/openshift_node_dnsmasq/README.md b/roles/openshift_node_dnsmasq/README.md
new file mode 100644
index 000000000..4596190d7
--- /dev/null
+++ b/roles/openshift_node_dnsmasq/README.md
@@ -0,0 +1,27 @@
+OpenShift Node DNS resolver
+===========================
+
+Configure dnsmasq to act as a DNS resolver for an OpenShift node.
+
+Requirements
+------------
+
+Role Variables
+--------------
+
+From this role:
+
+| Name | Default value | Description |
+|-----------------------------------------------------|---------------|-----------------------------------------------------------------------------------|
+| openshift_node_dnsmasq_install_network_manager_hook | true | Install NetworkManager hook updating /etc/resolv.conf with local dnsmasq instance |
+
+Dependencies
+------------
+
+* openshift_common
+* openshift_node_facts
+
+License
+-------
+
+Apache License Version 2.0
diff --git a/roles/openshift_node_dnsmasq/defaults/main.yml b/roles/openshift_node_dnsmasq/defaults/main.yml
index ed97d539c..ebcff46b5 100644
--- a/roles/openshift_node_dnsmasq/defaults/main.yml
+++ b/roles/openshift_node_dnsmasq/defaults/main.yml
@@ -1 +1,7 @@
---
+openshift_node_dnsmasq_install_network_manager_hook: true
+
+# lo must always be present in this list or dnsmasq will conflict with
+# the node's dns service.
+openshift_node_dnsmasq_except_interfaces:
+- lo
diff --git a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
index df02bcf0e..f4e48b5b7 100755
--- a/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
+++ b/roles/openshift_node_dnsmasq/files/networkmanager/99-origin-dns.sh
@@ -54,6 +54,8 @@ domain-needed
server=/cluster.local/172.30.0.1
server=/30.172.in-addr.arpa/172.30.0.1
enable-dbus
+dns-forward-max=5000
+cache-size=5000
EOF
# New config file, must restart
NEEDS_RESTART=1
@@ -112,10 +114,10 @@ EOF
fi
sed -e '/^nameserver.*$/d' /etc/resolv.conf >> ${NEW_RESOLV_CONF}
echo "nameserver "${def_route_ip}"" >> ${NEW_RESOLV_CONF}
- if ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
- sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
- elif ! grep -qw search ${NEW_RESOLV_CONF}; then
+ if ! grep -qw search ${NEW_RESOLV_CONF}; then
echo 'search cluster.local' >> ${NEW_RESOLV_CONF}
+ elif ! grep -q 'search.*cluster.local' ${NEW_RESOLV_CONF}; then
+ sed -i '/^search/ s/$/ cluster.local/' ${NEW_RESOLV_CONF}
fi
cp -Z ${NEW_RESOLV_CONF} /etc/resolv.conf
fi
diff --git a/roles/openshift_node_dnsmasq/tasks/network-manager.yml b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
index dddcfc9da..e5a92a630 100644
--- a/roles/openshift_node_dnsmasq/tasks/network-manager.yml
+++ b/roles/openshift_node_dnsmasq/tasks/network-manager.yml
@@ -5,5 +5,6 @@
dest: /etc/NetworkManager/dispatcher.d/
mode: 0755
notify: restart NetworkManager
+ when: openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
- meta: flush_handlers
diff --git a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2 b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
index ef3ba2880..6543c7c3e 100644
--- a/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
+++ b/roles/openshift_node_dnsmasq/templates/origin-dns.conf.j2
@@ -3,5 +3,10 @@ domain-needed
no-negcache
max-cache-ttl=1
enable-dbus
-bind-interfaces
-listen-address={{ openshift.node.dns_ip }}
+dns-forward-max=5000
+cache-size=5000
+bind-dynamic
+{% for interface in openshift_node_dnsmasq_except_interfaces %}
+except-interface={{ interface }}
+{% endfor %}
+# End of config
diff --git a/roles/openshift_node_facts/tasks/main.yml b/roles/openshift_node_facts/tasks/main.yml
index 0d5fa664c..b45130400 100644
--- a/roles/openshift_node_facts/tasks/main.yml
+++ b/roles/openshift_node_facts/tasks/main.yml
@@ -11,7 +11,6 @@
- role: node
local_facts:
annotations: "{{ openshift_node_annotations | default(none) }}"
- debug_level: "{{ openshift_node_debug_level | default(openshift.common.debug_level) }}"
iptables_sync_period: "{{ openshift_node_iptables_sync_period | default(None) }}"
kubelet_args: "{{ openshift_node_kubelet_args | default(None) }}"
labels: "{{ openshift_node_labels | default(None) }}"
diff --git a/roles/openshift_node_upgrade/README.md b/roles/openshift_node_upgrade/README.md
index c7c0ff34a..73b98ad90 100644
--- a/roles/openshift_node_upgrade/README.md
+++ b/roles/openshift_node_upgrade/README.md
@@ -49,7 +49,6 @@ From openshift.node:
| Name | Default Value | |
|------------------------------------|---------------------|---------------------|
-| openshift.node.debug_level |---------------------|---------------------|
| openshift.node.node_image |---------------------|---------------------|
| openshift.node.ovs_image |---------------------|---------------------|
diff --git a/roles/openshift_node_upgrade/defaults/main.yml b/roles/openshift_node_upgrade/defaults/main.yml
index 6507b015d..10b4c6977 100644
--- a/roles/openshift_node_upgrade/defaults/main.yml
+++ b/roles/openshift_node_upgrade/defaults/main.yml
@@ -1,4 +1,6 @@
---
+openshift_node_debug_level: "{{ debug_level | default(2) }}"
+
openshift_use_openshift_sdn: True
os_sdn_network_plugin_name: "redhat/openshift-ovs-subnet"
diff --git a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
index 1186062eb..527580481 100644
--- a/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
+++ b/roles/openshift_node_upgrade/tasks/config/configure-node-settings.yml
@@ -7,7 +7,7 @@
create: true
with_items:
- regex: '^OPTIONS='
- line: "OPTIONS=--loglevel={{ openshift.node.debug_level | default(2) }}"
+ line: "OPTIONS=--loglevel={{ openshift_node_debug_level }}"
- regex: '^CONFIG_FILE='
line: "CONFIG_FILE={{ openshift.common.config_base }}/node/node-config.yaml"
- regex: '^IMAGE_VERSION='
diff --git a/roles/openshift_node_upgrade/tasks/registry_auth.yml b/roles/openshift_node_upgrade/tasks/registry_auth.yml
index de396fb4b..5e5e4f94a 100644
--- a/roles/openshift_node_upgrade/tasks/registry_auth.yml
+++ b/roles/openshift_node_upgrade/tasks/registry_auth.yml
@@ -11,6 +11,9 @@
- oreg_auth_user is defined
- (not node_oreg_auth_credentials_stat.stat.exists or oreg_auth_credentials_replace) | bool
register: node_oreg_auth_credentials_create
+ retries: 3
+ delay: 5
+ until: node_oreg_auth_credentials_create.rc == 0
notify:
- restart node
diff --git a/roles/openshift_node_upgrade/tasks/systemd_units.yml b/roles/openshift_node_upgrade/tasks/systemd_units.yml
index afff2f8ba..226f5290c 100644
--- a/roles/openshift_node_upgrade/tasks/systemd_units.yml
+++ b/roles/openshift_node_upgrade/tasks/systemd_units.yml
@@ -6,7 +6,7 @@
# - openshift.node.ovs_image
# - openshift_use_openshift_sdn
# - openshift.common.service_type
-# - openshift.node.debug_level
+# - openshift_node_debug_level
# - openshift.common.config_base
# - openshift.common.http_proxy
# - openshift.common.portal_net
diff --git a/roles/openshift_node_upgrade/templates/openshift.docker.node.service b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
index 864e4b5d6..07d1ebc3c 100644
--- a/roles/openshift_node_upgrade/templates/openshift.docker.node.service
+++ b/roles/openshift_node_upgrade/templates/openshift.docker.node.service
@@ -6,6 +6,7 @@ PartOf={{ openshift.docker.service_name }}.service
Requires={{ openshift.docker.service_name }}.service
{% if openshift_use_openshift_sdn %}
Wants=openvswitch.service
+PartOf=openvswitch.service
After=ovsdb-server.service
After=ovs-vswitchd.service
{% endif %}
diff --git a/roles/openshift_prometheus/README.md b/roles/openshift_prometheus/README.md
index c5a44bffb..92f74928c 100644
--- a/roles/openshift_prometheus/README.md
+++ b/roles/openshift_prometheus/README.md
@@ -17,16 +17,16 @@ For default values, see [`defaults/main.yaml`](defaults/main.yaml).
- `openshift_prometheus_namespace`: project (i.e. namespace) where the components will be
deployed.
-- `openshift_prometheus_replicas`: The number of replicas for prometheus deployment.
-
- `openshift_prometheus_node_selector`: Selector for the nodes prometheus will be deployed on.
-- `openshift_prometheus_image_<COMPONENT>`: specify image for the component
+- `openshift_prometheus_<COMPONENT>_image_prefix`: specify image prefix for the component
+
+- `openshift_prometheus_<COMPONENT>_image_version`: specify image version for the component
-## Storage related variables
-Each prometheus component (prometheus, alertmanager, alert-buffer, oauth-proxy) can set pv claim by setting corresponding role variable:
+## PVC related variables
+Each prometheus component (prometheus, alertmanager, alertbuffer) can set pv claim by setting corresponding role variable:
```
-openshift_prometheus_<COMPONENT>_storage_type: <VALUE>
+openshift_prometheus_<COMPONENT>_storage_type: <VALUE> (pvc, emptydir)
openshift_prometheus_<COMPONENT>_pvc_(name|size|access_modes|pv_selector): <VALUE>
```
e.g
@@ -37,6 +37,29 @@ openshift_prometheus_alertbuffer_pvc_size: 10G
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
```
+## NFS PV Storage variables
+Each prometheus component (prometheus, alertmanager, alertbuffer) can set nfs pv by setting corresponding variable:
+```
+openshift_prometheus_<COMPONENT>_storage_kind=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_(access_modes|host|labels)=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_volume_(name|size)=<VALUE>
+openshift_prometheus_<COMPONENT>_storage_nfs_(directory|options)=<VALUE>
+```
+e.g
+```
+openshift_prometheus_storage_kind=nfs
+openshift_prometheus_storage_access_modes=['ReadWriteOnce']
+openshift_prometheus_storage_host=nfs.example.com #for external host
+openshift_prometheus_storage_nfs_directory=/exports
+openshift_prometheus_storage_alertmanager_nfs_options='*(rw,root_squash)'
+openshift_prometheus_storage_volume_name=prometheus
+openshift_prometheus_storage_alertbuffer_volume_size=10Gi
+openshift_prometheus_storage_labels={'storage': 'prometheus'}
+```
+
+NOTE: Setting `openshift_prometheus_<COMPONENT>_storage_labels` overrides `openshift_prometheus_<COMPONENT>_pvc_pv_selector`
+
+
## Additional Alert Rules file variable
An external file with alert rules can be added by setting path to additional rules variable:
```
diff --git a/roles/openshift_prometheus/defaults/main.yaml b/roles/openshift_prometheus/defaults/main.yaml
index 5aa8aecec..00995eee6 100644
--- a/roles/openshift_prometheus/defaults/main.yaml
+++ b/roles/openshift_prometheus/defaults/main.yaml
@@ -4,56 +4,42 @@ openshift_prometheus_state: present
openshift_prometheus_namespace: prometheus
-openshift_prometheus_replicas: 1
openshift_prometheus_node_selector: {"region":"infra"}
-# images
-openshift_prometheus_image_proxy: "openshift/oauth-proxy:v1.0.0"
-openshift_prometheus_image_prometheus: "openshift/prometheus:v2.0.0-dev"
-openshift_prometheus_image_alertmanager: "openshift/prometheus-alertmanager:dev"
-openshift_prometheus_image_alertbuffer: "openshift/prometheus-alert-buffer:v0.0.1"
+# image defaults
+openshift_prometheus_image_prefix: "openshift/"
+openshift_prometheus_image_version: "v2.0.0-dev.3"
+openshift_prometheus_proxy_image_prefix: "openshift/"
+openshift_prometheus_proxy_image_version: "v1.0.0"
+openshift_prometheus_alertmanager_image_prefix: "openshift/"
+openshift_prometheus_alertmanager_image_version: "v0.9.1"
+openshift_prometheus_alertbuffer_image_prefix: "openshift/"
+openshift_prometheus_alertbuffer_image_version: "v0.0.2"
# additional prometheus rules file
openshift_prometheus_additional_rules_file: null
-# All the required exports
-openshift_prometheus_pv_exports:
- - prometheus
- - prometheus-alertmanager
- - prometheus-alertbuffer
-# PV template files and their created object names
-openshift_prometheus_pv_data:
- - pv_name: prometheus
- pv_template: prom-pv-server.yml
- pv_label: Prometheus Server PV
- - pv_name: prometheus-alertmanager
- pv_template: prom-pv-alertmanager.yml
- pv_label: Prometheus Alertmanager PV
- - pv_name: prometheus-alertbuffer
- pv_template: prom-pv-alertbuffer.yml
- pv_label: Prometheus Alert Buffer PV
-
-# Hostname/IP of the NFS server. Currently defaults to first master
-openshift_prometheus_nfs_server: "{{ groups.nfs.0 }}"
-
# storage
-openshift_prometheus_storage_type: pvc
+# One of ['emptydir', 'pvc']
+openshift_prometheus_storage_type: "emptydir"
openshift_prometheus_pvc_name: prometheus
-openshift_prometheus_pvc_size: 10G
+openshift_prometheus_pvc_size: "{{ openshift_prometheus_storage_volume_size | default('10Gi') }}"
openshift_prometheus_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_pvc_pv_selector: {}
+openshift_prometheus_pvc_pv_selector: "{{ openshift_prometheus_storage_labels | default({}) }}"
-openshift_prometheus_alertmanager_storage_type: pvc
+# One of ['emptydir', 'pvc']
+openshift_prometheus_alertmanager_storage_type: "emptydir"
openshift_prometheus_alertmanager_pvc_name: prometheus-alertmanager
-openshift_prometheus_alertmanager_pvc_size: 10G
+openshift_prometheus_alertmanager_pvc_size: "{{ openshift_prometheus_alertmanager_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertmanager_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertmanager_pvc_pv_selector: {}
+openshift_prometheus_alertmanager_pvc_pv_selector: "{{ openshift_prometheus_alertmanager_storage_labels | default({}) }}"
-openshift_prometheus_alertbuffer_storage_type: pvc
+# One of ['emptydir', 'pvc']
+openshift_prometheus_alertbuffer_storage_type: "emptydir"
openshift_prometheus_alertbuffer_pvc_name: prometheus-alertbuffer
-openshift_prometheus_alertbuffer_pvc_size: 10G
+openshift_prometheus_alertbuffer_pvc_size: "{{ openshift_prometheus_alertbuffer_storage_volume_size | default('10Gi') }}"
openshift_prometheus_alertbuffer_pvc_access_modes: [ReadWriteOnce]
-openshift_prometheus_alertbuffer_pvc_pv_selector: {}
+openshift_prometheus_alertbuffer_pvc_pv_selector: "{{ openshift_prometheus_alertbuffer_storage_labels | default({}) }}"
# container resources
openshift_prometheus_cpu_limit: null
diff --git a/roles/openshift_prometheus/files/openshift_prometheus.exports b/roles/openshift_prometheus/files/openshift_prometheus.exports
deleted file mode 100644
index 3ccedb1fd..000000000
--- a/roles/openshift_prometheus/files/openshift_prometheus.exports
+++ /dev/null
@@ -1,3 +0,0 @@
-/exports/prometheus *(rw,no_root_squash,no_wdelay)
-/exports/prometheus-alertmanager *(rw,no_root_squash,no_wdelay)
-/exports/prometheus-alertbuffer *(rw,no_root_squash,no_wdelay)
diff --git a/roles/openshift_prometheus/tasks/create_pvs.yaml b/roles/openshift_prometheus/tasks/create_pvs.yaml
deleted file mode 100644
index 4e79da05f..000000000
--- a/roles/openshift_prometheus/tasks/create_pvs.yaml
+++ /dev/null
@@ -1,36 +0,0 @@
----
-# Check for existance and then conditionally:
-# - evaluate templates
-# - PVs
-#
-# These tasks idempotently create required Prometheus PV objects. Do not
-# call this file directly. This file is intended to be ran as an
-# include that has a 'with_items' attached to it. Hence the use below
-# of variables like "{{ item.pv_label }}"
-
-- name: "Check if the {{ item.pv_label }} template has been created already"
- oc_obj:
- namespace: "{{ openshift_prometheus_namespace }}"
- state: list
- kind: pv
- name: "{{ item.pv_name }}"
- register: prom_pv_check
-
-# Skip all of this if the PV already exists
-- block:
- - name: "Ensure the {{ item.pv_label }} template is evaluated"
- template:
- src: "{{ item.pv_template }}.j2"
- dest: "{{ tempdir }}/templates/{{ item.pv_template }}"
-
- - name: "Ensure {{ item.pv_label }} is created"
- oc_obj:
- namespace: "{{ openshift_prometheus_namespace }}"
- kind: pv
- name: "{{ item.pv_name }}"
- state: present
- delete_after: True
- files:
- - "{{ tempdir }}/templates/{{ item.pv_template }}"
- when:
- - not prom_pv_check.results.results.0
diff --git a/roles/openshift_prometheus/tasks/install_prometheus.yaml b/roles/openshift_prometheus/tasks/install_prometheus.yaml
index a9bce2fb1..00c3c1987 100644
--- a/roles/openshift_prometheus/tasks/install_prometheus.yaml
+++ b/roles/openshift_prometheus/tasks/install_prometheus.yaml
@@ -54,15 +54,6 @@
resource_name: cluster-reader
user: "system:serviceaccount:{{ openshift_prometheus_namespace }}:prometheus"
-
-######################################################################
-# NFS
-# In the case that we are not running on a cloud provider, volumes must be statically provisioned
-
-- include: nfs.yaml
- when: not (openshift_cloudprovider_kind is defined and (openshift_cloudprovider_kind == 'aws' or openshift_cloudprovider_kind == 'gce'))
-
-
# create prometheus and alerts services
# TODO join into 1 task with loop
- name: Create prometheus service
@@ -137,6 +128,7 @@
access_modes: "{{ openshift_prometheus_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_pvc_size }}"
selector: "{{ openshift_prometheus_pvc_pv_selector }}"
+ when: openshift_prometheus_storage_type == 'pvc'
- name: create alertmanager pvc
oc_pvc:
@@ -145,6 +137,7 @@
access_modes: "{{ openshift_prometheus_alertmanager_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertmanager_pvc_size }}"
selector: "{{ openshift_prometheus_alertmanager_pvc_pv_selector }}"
+ when: openshift_prometheus_alertmanager_storage_type == 'pvc'
- name: create alertbuffer pvc
oc_pvc:
@@ -153,22 +146,23 @@
access_modes: "{{ openshift_prometheus_alertbuffer_pvc_access_modes }}"
volume_capacity: "{{ openshift_prometheus_alertbuffer_pvc_size }}"
selector: "{{ openshift_prometheus_alertbuffer_pvc_pv_selector }}"
+ when: openshift_prometheus_alertbuffer_storage_type == 'pvc'
-# create prometheus deployment
-- name: Set prometheus deployment template
+# create prometheus stateful set
+- name: Set prometheus template
template:
- src: prometheus_deployment.j2
+ src: prometheus.j2
dest: "{{ tempdir }}/templates/prometheus.yaml"
vars:
namespace: "{{ openshift_prometheus_namespace }}"
- prom_replicas: "{{ openshift_prometheus_replicas }}"
+# prom_replicas: "{{ openshift_prometheus_replicas }}"
-- name: Set prometheus deployment
+- name: Set prometheus stateful set
oc_obj:
state: "{{ state }}"
name: "prometheus"
namespace: "{{ openshift_prometheus_namespace }}"
- kind: deployment
+ kind: statefulset
files:
- "{{ tempdir }}/templates/prometheus.yaml"
delete_after: true
diff --git a/roles/openshift_prometheus/tasks/nfs.yaml b/roles/openshift_prometheus/tasks/nfs.yaml
deleted file mode 100644
index 0b45f2cee..000000000
--- a/roles/openshift_prometheus/tasks/nfs.yaml
+++ /dev/null
@@ -1,44 +0,0 @@
----
-# Tasks to statically provision NFS volumes
-# Include if not using dynamic volume provisioning
-- name: Ensure the /exports/ directory exists
- file:
- path: /exports/
- state: directory
- mode: 0755
- owner: root
- group: root
-
-- name: Ensure the prom-pv0X export directories exist
- file:
- path: "/exports/{{ item }}"
- state: directory
- mode: 0777
- owner: nfsnobody
- group: nfsnobody
- with_items: "{{ openshift_prometheus_pv_exports }}"
-
-- name: Ensure the NFS exports for Prometheus PVs exist
- copy:
- src: openshift_prometheus.exports
- dest: /etc/exports.d/openshift_prometheus.exports
- register: nfs_exports_updated
-
-- name: Ensure the NFS export table is refreshed if exports were added
- command: exportfs -ar
- when:
- - nfs_exports_updated.changed
-
-
-######################################################################
-# Create the required Prometheus PVs. Check out these online docs if you
-# need a refresher on includes looping with items:
-# * http://docs.ansible.com/ansible/playbooks_loops.html#loops-and-includes-in-2-0
-# * http://stackoverflow.com/a/35128533
-#
-# TODO: Handle the case where a PV template is updated in
-# openshift-ansible and the change needs to be landed on the managed
-# cluster.
-
-- include: create_pvs.yaml
- with_items: "{{ openshift_prometheus_pv_data }}"
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
deleted file mode 100644
index 55a5e19c3..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-alertbuffer.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus-alertbuffer
- labels:
- storage: prometheus-alertbuffer
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus-alertbuffer
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
deleted file mode 100644
index 4ee518735..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-alertmanager.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus-alertmanager
- labels:
- storage: prometheus-alertmanager
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus-alertmanager
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2 b/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
deleted file mode 100644
index 933bf0f60..000000000
--- a/roles/openshift_prometheus/templates/prom-pv-server.yml.j2
+++ /dev/null
@@ -1,15 +0,0 @@
-apiVersion: v1
-kind: PersistentVolume
-metadata:
- name: prometheus
- labels:
- storage: prometheus
-spec:
- capacity:
- storage: 15Gi
- accessModes:
- - ReadWriteOnce
- nfs:
- path: /exports/prometheus
- server: {{ openshift_prometheus_nfs_server }}
- persistentVolumeReclaimPolicy: Retain
diff --git a/roles/openshift_prometheus/templates/prometheus_deployment.j2 b/roles/openshift_prometheus/templates/prometheus.j2
index 98c117f19..916c57aa2 100644
--- a/roles/openshift_prometheus/templates/prometheus_deployment.j2
+++ b/roles/openshift_prometheus/templates/prometheus.j2
@@ -1,12 +1,14 @@
-apiVersion: extensions/v1beta1
-kind: Deployment
+apiVersion: apps/v1beta1
+kind: StatefulSet
metadata:
name: prometheus
namespace: {{ namespace }}
labels:
app: prometheus
spec:
- replicas: {{ prom_replicas|default(1) }}
+ updateStrategy:
+ type: RollingUpdate
+ podManagementPolicy: Parallel
selector:
provider: openshift
matchLabels:
@@ -27,7 +29,7 @@ spec:
containers:
# Deploy Prometheus behind an oauth proxy
- name: prom-proxy
- image: "{{ openshift_prometheus_image_proxy }}"
+ image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -38,7 +40,7 @@ spec:
cpu: "{{openshift_prometheus_oauth_proxy_cpu_requests}}"
{% endif %}
limits:
-{% if openshift_prometheus_memory_requests_limit_proxy is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
+{% if openshift_prometheus_oauth_proxy_memory_limit is defined and openshift_prometheus_oauth_proxy_memory_limit is not none %}
memory: "{{openshift_prometheus_oauth_proxy_memory_limit}}"
{% endif %}
{% if openshift_prometheus_oauth_proxy_cpu_limit is defined and openshift_prometheus_oauth_proxy_cpu_limit is not none %}
@@ -60,6 +62,8 @@ spec:
- -tls-key=/etc/tls/private/tls.key
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -openshift-ca=/etc/pki/tls/cert.pem
+ - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
- -skip-auth-regex=^/metrics
volumeMounts:
- mountPath: /etc/tls/private
@@ -72,9 +76,10 @@ spec:
- name: prometheus
args:
- --storage.tsdb.retention=6h
+ - --storage.tsdb.min-block-duration=2m
- --config.file=/etc/prometheus/prometheus.yml
- --web.listen-address=localhost:9090
- image: "{{ openshift_prometheus_image_prometheus }}"
+ image: "{{openshift_prometheus_image_prefix}}prometheus:{{openshift_prometheus_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -100,7 +105,7 @@ spec:
# Deploy alertmanager behind prometheus-alert-buffer behind an oauth proxy
- name: alerts-proxy
- image: "{{ openshift_prometheus_image_proxy }}"
+ image: "{{openshift_prometheus_proxy_image_prefix}}oauth-proxy:{{openshift_prometheus_proxy_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -133,6 +138,8 @@ spec:
- -tls-key=/etc/tls/private/tls.key
- -client-secret-file=/var/run/secrets/kubernetes.io/serviceaccount/token
- -cookie-secret-file=/etc/proxy/secrets/session_secret
+ - -openshift-ca=/etc/pki/tls/cert.pem
+ - -openshift-ca=/var/run/secrets/kubernetes.io/serviceaccount/ca.crt
volumeMounts:
- mountPath: /etc/tls/private
name: alerts-tls
@@ -142,7 +149,7 @@ spec:
- name: alert-buffer
args:
- --storage-path=/alert-buffer/messages.db
- image: "{{ openshift_prometheus_image_alertbuffer }}"
+ image: "{{openshift_prometheus_alertbuffer_image_prefix}}prometheus-alert-buffer:{{openshift_prometheus_alertbuffer_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
@@ -169,7 +176,7 @@ spec:
- name: alertmanager
args:
- -config.file=/etc/alertmanager/alertmanager.yml
- image: "{{ openshift_prometheus_image_alertmanager }}"
+ image: "{{openshift_prometheus_alertmanager_image_prefix}}prometheus-alertmanager:{{openshift_prometheus_alertmanager_image_version}}"
imagePullPolicy: IfNotPresent
resources:
requests:
diff --git a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
index ac21a5e37..1e6aafd00 100644
--- a/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
+++ b/roles/openshift_provisioners/tasks/generate_clusterrolebindings.yaml
@@ -1,6 +1,8 @@
---
- name: Generate ClusterRoleBindings
- template: src=clusterrolebinding.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-clusterrolebinding.yaml
+ template:
+ src: clusterrolebinding.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-clusterrolebinding.yaml"
vars:
acct_name: provisioners-{{item}}
obj_name: run-provisioners-{{item}}
diff --git a/roles/openshift_provisioners/tasks/generate_secrets.yaml b/roles/openshift_provisioners/tasks/generate_secrets.yaml
index e6cbb1bbf..fe5ff9f18 100644
--- a/roles/openshift_provisioners/tasks/generate_secrets.yaml
+++ b/roles/openshift_provisioners/tasks/generate_secrets.yaml
@@ -1,6 +1,8 @@
---
- name: Generate secret for efs
- template: src=secret.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-secret.yaml
+ template:
+ src: secret.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-secret.yaml"
vars:
name: efs
obj_name: "provisioners-efs"
diff --git a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
index 4fe0583ee..000f19994 100644
--- a/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
+++ b/roles/openshift_provisioners/tasks/generate_serviceaccounts.yaml
@@ -1,6 +1,8 @@
---
- name: Generating serviceaccounts
- template: src=serviceaccount.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-sa.yaml
+ template:
+ src: serviceaccount.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-sa.yaml"
vars:
obj_name: provisioners-{{item}}
labels:
diff --git a/roles/openshift_provisioners/tasks/install_efs.yaml b/roles/openshift_provisioners/tasks/install_efs.yaml
index 4a6e00513..6e8792446 100644
--- a/roles/openshift_provisioners/tasks/install_efs.yaml
+++ b/roles/openshift_provisioners/tasks/install_efs.yaml
@@ -9,7 +9,9 @@
changed_when: no
- name: Generate efs PersistentVolumeClaim
- template: src=pvc.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pvc.yaml
+ template:
+ src: pvc.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-pvc.yaml"
vars:
obj_name: "provisioners-efs"
size: "1Mi"
@@ -21,7 +23,9 @@
changed_when: no
- name: Generate efs PersistentVolume
- template: src=pv.j2 dest={{mktemp.stdout}}/templates/{{obj_name}}-pv.yaml
+ template:
+ src: pv.j2
+ dest: "{{ mktemp.stdout }}/templates/{{ obj_name }}-pv.yaml"
vars:
obj_name: "provisioners-efs"
size: "1Mi"
diff --git a/roles/openshift_provisioners/tasks/install_support.yaml b/roles/openshift_provisioners/tasks/install_support.yaml
index ba472f1c9..d6db81ab9 100644
--- a/roles/openshift_provisioners/tasks/install_support.yaml
+++ b/roles/openshift_provisioners/tasks/install_support.yaml
@@ -1,16 +1,9 @@
---
-- name: Check for provisioners project already exists
- command: >
- {{ openshift.common.client_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig get project {{openshift_provisioners_project}} --no-headers
- register: provisioners_project_result
- ignore_errors: yes
- when: not ansible_check_mode
- changed_when: no
-
-- name: Create provisioners project
- command: >
- {{ openshift.common.admin_binary }} --config={{ mktemp.stdout }}/admin.kubeconfig new-project {{openshift_provisioners_project}}
- when: not ansible_check_mode and "not found" in provisioners_project_result.stderr
+- name: Set provisioners project
+ oc_project:
+ state: present
+ kubeconfig: "{{ mktemp.stdout }}/admin.kubeconfig"
+ name: "{{ openshift_provisioners_project }}"
- name: Create temp directory for all our templates
file: path={{mktemp.stdout}}/templates state=directory mode=0755
diff --git a/roles/openshift_provisioners/templates/pv.j2 b/roles/openshift_provisioners/templates/pv.j2
index f4128f9f0..f81b1617a 100644
--- a/roles/openshift_provisioners/templates/pv.j2
+++ b/roles/openshift_provisioners/templates/pv.j2
@@ -30,3 +30,4 @@ spec:
name: {{claim_name}}
namespace: {{openshift_provisioners_project}}
{% endif %}
+ storageClassName: ""
diff --git a/roles/openshift_provisioners/templates/pvc.j2 b/roles/openshift_provisioners/templates/pvc.j2
index 83d503056..0dd8772eb 100644
--- a/roles/openshift_provisioners/templates/pvc.j2
+++ b/roles/openshift_provisioners/templates/pvc.j2
@@ -23,4 +23,5 @@ spec:
resources:
requests:
storage: {{size}}
+ storageClassName: ""
diff --git a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
index e534e0cca..7c1573096 100644
--- a/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
+++ b/roles/openshift_sanitize_inventory/tasks/__deprecations_logging.yml
@@ -21,16 +21,22 @@
openshift_logging_image_pull_secret: openshift_hosted_logging_image_pull_secret
openshift_logging_kibana_hostname: openshift_hosted_logging_hostname
openshift_logging_kibana_ops_hostname: openshift_hosted_logging_ops_hostname
+ openshift_logging_kibana_nodeselector: openshift_hosted_logging_kibana_nodeselector
+ openshift_logging_kibana_ops_nodeselector: openshift_hosted_logging_kibana_ops_nodeselector
openshift_logging_fluentd_journal_source: openshift_hosted_logging_journal_source
openshift_logging_fluentd_journal_read_from_head: openshift_hosted_logging_journal_read_from_head
+ openshift_logging_fluentd_nodeselector: openshift_hosted_logging_fluentd_nodeselector_label
openshift_logging_es_memory_limit: openshift_hosted_logging_elasticsearch_instance_ram
openshift_logging_es_nodeselector: openshift_hosted_logging_elasticsearch_nodeselector
+ openshift_logging_es_ops_nodeselector: openshift_hosted_logging_elasticsearch_ops_nodeselector
openshift_logging_es_ops_memory_limit: openshift_hosted_logging_elasticsearch_ops_instance_ram
openshift_logging_storage_access_modes: openshift_hosted_logging_storage_access_modes
openshift_logging_master_public_url: openshift_hosted_logging_master_public_url
openshift_logging_image_prefix: openshift_hosted_logging_deployer_prefix
openshift_logging_image_version: openshift_hosted_logging_deployer_version
openshift_logging_install_logging: openshift_hosted_logging_deploy
+ openshift_logging_curator_nodeselector: openshift_hosted_logging_curator_nodeselector
+ openshift_logging_curator_ops_nodeselector: openshift_hosted_logging_curator_ops_nodeselector
- set_fact:
@@ -40,9 +46,3 @@
openshift_logging_elasticsearch_ops_pvc_dynamic: "{{ 'true' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
openshift_logging_elasticsearch_ops_pvc_size: "{{ openshift_loggingops_storage_volume_size | default('10Gi') if openshift_loggingops_storage_kind | default(none) in ['dynamic','nfs'] else '' }}"
openshift_logging_elasticsearch_ops_pvc_prefix: "{{ 'logging-es-ops' if openshift_loggingops_storage_kind | default(none) == 'dynamic' else '' }}"
- openshift_logging_curator_nodeselector: "{{ openshift_hosted_logging_curator_nodeselector | default('') | map_from_pairs }}"
- openshift_logging_curator_ops_nodeselector: "{{ openshift_hosted_logging_curator_ops_nodeselector | default('') | map_from_pairs }}"
- openshift_logging_kibana_nodeselector: "{{ openshift_hosted_logging_kibana_nodeselector | default('') | map_from_pairs }}"
- openshift_logging_kibana_ops_nodeselector: "{{ openshift_hosted_logging_kibana_ops_nodeselector | default('') | map_from_pairs }}"
- openshift_logging_fluentd_nodeselector: "{{ openshift_hosted_logging_fluentd_nodeselector_label | default('logging-infra-fluentd=true') | map_from_pairs }}"
- openshift_logging_es_ops_nodeselector: "{{ openshift_hosted_logging_elasticsearch_ops_nodeselector | default('') | map_from_pairs }}"
diff --git a/roles/openshift_sanitize_inventory/tasks/main.yml b/roles/openshift_sanitize_inventory/tasks/main.yml
index e327ee9f5..a6c168bc7 100644
--- a/roles/openshift_sanitize_inventory/tasks/main.yml
+++ b/roles/openshift_sanitize_inventory/tasks/main.yml
@@ -23,6 +23,8 @@
# TODO: once this is well-documented, add deprecation notice if using old name.
deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
openshift_deployment_type: "{{ openshift_deployment_type | default(deployment_type) | default | string }}"
+ deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
+ openshift_deployment_subtype: "{{ openshift_deployment_subtype | default(deployment_subtype) | default('basic') | string }}"
- name: Abort when deployment type is invalid
# this variable is required; complain early and clearly if it is invalid.
@@ -45,7 +47,7 @@
- name: Abort when openshift_release is invalid
when:
- openshift_release is defined
- - not openshift_release | match('\d+(\.\d+){1,3}$')
+ - not openshift_release | match('^\d+(\.\d+){1,3}$')
fail:
msg: |-
openshift_release is "{{ openshift_release }}" which is not a valid version string.
@@ -54,3 +56,16 @@
- include: unsupported.yml
when:
- not openshift_enable_unsupported_configurations | default(false) | bool
+
+- name: Ensure clusterid is set along with the cloudprovider
+ fail:
+ msg: >
+ Ensure that the openshift_clusterid is set and that all infrastructure has the required tags.
+
+ For dynamic provisioning when using multiple clusters in different zones, tag each node with Key=kubernetes.io/cluster/xxxx,Value=clusterid where xxxx and clusterid are unique per cluster. In versions prior to 3.6, this was Key=KubernetesCluster,Value=clusterid.
+
+ https://github.com/openshift/openshift-docs/blob/master/install_config/persistent_storage/dynamically_provisioning_pvs.adoc#available-dynamically-provisioned-plug-ins
+ when:
+ - openshift_clusterid is not defined
+ - openshift_cloudprovider_kind is defined
+ - openshift_cloudprovider_kind == 'aws'
diff --git a/roles/openshift_sanitize_inventory/tasks/unsupported.yml b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
index 39bf1780a..b70ab90a1 100644
--- a/roles/openshift_sanitize_inventory/tasks/unsupported.yml
+++ b/roles/openshift_sanitize_inventory/tasks/unsupported.yml
@@ -11,6 +11,14 @@
will not function. This also means that NetworkManager must be installed
enabled and responsible for management of the primary interface.
+- name: Ensure that openshift_node_dnsmasq_install_network_manager_hook is true
+ when:
+ - not openshift_node_dnsmasq_install_network_manager_hook | default(true) | bool
+ fail:
+ msg: |-
+ The NetworkManager hook is considered a critical part of the DNS
+ infrastructure.
+
- set_fact:
__using_dynamic: True
when:
diff --git a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
index 71e21a269..f449fba2b 100644
--- a/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubeservicecatalog_roles_bindings.yml
@@ -1,25 +1,26 @@
apiVersion: v1
kind: Template
metadata:
- name: service-catalog
+ name: service-catalog-role-bindings
objects:
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: servicecatalog-serviceclass-viewer
rules:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - serviceclasses
+ - clusterserviceclasses
+ - clusterserviceplans
verbs:
- list
- watch
- get
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: servicecatalog-serviceclass-viewer-binding
roleRef:
@@ -37,8 +38,8 @@ objects:
metadata:
name: service-catalog-apiserver
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: sar-creator
rules:
@@ -49,17 +50,19 @@ objects:
verbs:
- create
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-sar-creator-binding
roleRef:
name: sar-creator
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: namespace-viewer
rules:
@@ -72,26 +75,30 @@ objects:
- watch
- get
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-namespace-viewer-binding
roleRef:
name: namespace-viewer
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-controller-namespace-viewer-binding
roleRef:
name: namespace-viewer
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-controller
+ namespace: kube-service-catalog
-- kind: ClusterRole
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRole
metadata:
name: service-catalog-controller
rules:
@@ -102,6 +109,7 @@ objects:
verbs:
- create
- update
+ - patch
- delete
- get
- list
@@ -109,19 +117,22 @@ objects:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - brokers/status
- - instances/status
- - bindings/status
+ - clusterservicebrokers/status
+ - serviceinstances/status
+ - servicebindings/status
+ - servicebindings/finalizers
+ - serviceinstances/reference
verbs:
- update
- apiGroups:
- servicecatalog.k8s.io
resources:
- - brokers
- - instances
- - bindings
+ - clusterservicebrokers
+ - serviceinstances
+ - servicebindings
verbs:
- list
+ - get
- watch
- apiGroups:
- ""
@@ -133,7 +144,8 @@ objects:
- apiGroups:
- servicecatalog.k8s.io
resources:
- - serviceclasses
+ - clusterserviceclasses
+ - clusterserviceplans
verbs:
- create
- delete
@@ -154,17 +166,19 @@ objects:
- list
- watch
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: service-catalog-controller-binding
roleRef:
name: service-catalog-controller
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
-
-- kind: Role
- apiVersion: v1
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-controller
+ namespace: kube-service-catalog
+
+- apiVersion: authorization.openshift.io/v1
+ kind: Role
metadata:
name: endpoint-accessor
rules:
@@ -179,21 +193,25 @@ objects:
- create
- update
-- kind: RoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: RoleBinding
metadata:
- name: endpoint-accessor-binding
+ name: endpointer-accessor-binding
roleRef:
name: endpoint-accessor
namespace: kube-service-catalog
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-controller
+ subjects:
+ - kind: ServiceAccount
+ namespace: kube-service-catalog
+ name: service-catalog-controller
-- kind: ClusterRoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: ClusterRoleBinding
metadata:
name: system:auth-delegator-binding
roleRef:
name: system:auth-delegator
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
index f6ee0955d..f563ae42e 100644
--- a/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
+++ b/roles/openshift_service_catalog/files/kubesystem_roles_bindings.yml
@@ -1,11 +1,11 @@
apiVersion: v1
kind: Template
metadata:
- name: kube-system-service-catalog
+ name: kube-system-service-catalog-role-bindings
objects:
-- kind: Role
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: Role
metadata:
name: extension-apiserver-authentication-reader
namespace: ${KUBE_SYSTEM_NAMESPACE}
@@ -19,16 +19,18 @@ objects:
verbs:
- get
-- kind: RoleBinding
- apiVersion: v1
+- apiVersion: authorization.openshift.io/v1
+ kind: RoleBinding
metadata:
name: extension-apiserver-authentication-reader-binding
namespace: ${KUBE_SYSTEM_NAMESPACE}
roleRef:
name: extension-apiserver-authentication-reader
- namespace: kube-system
- userNames:
- - system:serviceaccount:kube-service-catalog:service-catalog-apiserver
+ namespace: ${KUBE_SYSTEM_NAMESPACE}
+ subjects:
+ - kind: ServiceAccount
+ name: service-catalog-apiserver
+ namespace: kube-service-catalog
parameters:
- description: Do not change this value.
diff --git a/roles/openshift_service_catalog/tasks/generate_certs.yml b/roles/openshift_service_catalog/tasks/generate_certs.yml
index cc897b032..9d55185c8 100644
--- a/roles/openshift_service_catalog/tasks/generate_certs.yml
+++ b/roles/openshift_service_catalog/tasks/generate_certs.yml
@@ -16,6 +16,16 @@
--key={{ generated_certs_dir }}/ca.key --cert={{ generated_certs_dir }}/ca.crt
--serial={{ generated_certs_dir }}/apiserver.serial.txt --name=service-catalog-signer
+- name: Delete old apiserver.crt
+ file:
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+ state: absent
+
+- name: Delete old apiserver.key
+ file:
+ path: "{{ generated_certs_dir }}/apiserver.key"
+ state: absent
+
- name: Generating server keys
oc_adm_ca_server_cert:
cert: "{{ generated_certs_dir }}/apiserver.crt"
@@ -36,19 +46,28 @@
- name: tls.key
path: "{{ generated_certs_dir }}/apiserver.key"
+- name: Create service-catalog-ssl secret
+ oc_secret:
+ state: present
+ name: service-catalog-ssl
+ namespace: kube-service-catalog
+ files:
+ - name: tls.crt
+ path: "{{ generated_certs_dir }}/apiserver.crt"
+
- slurp:
src: "{{ generated_certs_dir }}/ca.crt"
register: apiserver_ca
- shell: >
- oc get apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
+ oc get apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io -n kube-service-catalog || echo "not found"
register: get_apiservices
changed_when: no
- name: Create api service
oc_obj:
state: present
- name: v1alpha1.servicecatalog.k8s.io
+ name: v1beta1.servicecatalog.k8s.io
kind: apiservices.apiregistration.k8s.io
namespace: "kube-service-catalog"
content:
@@ -57,10 +76,10 @@
apiVersion: apiregistration.k8s.io/v1beta1
kind: APIService
metadata:
- name: v1alpha1.servicecatalog.k8s.io
+ name: v1beta1.servicecatalog.k8s.io
spec:
group: servicecatalog.k8s.io
- version: v1alpha1
+ version: v1beta1
service:
namespace: "kube-service-catalog"
name: apiserver
diff --git a/roles/openshift_service_catalog/tasks/install.yml b/roles/openshift_service_catalog/tasks/install.yml
index e202ae173..d17468b5c 100644
--- a/roles/openshift_service_catalog/tasks/install.yml
+++ b/roles/openshift_service_catalog/tasks/install.yml
@@ -47,16 +47,15 @@
dest: "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
- oc_obj:
- name: service-catalog
+ name: service-catalog-role-bindings
kind: template
namespace: "kube-service-catalog"
files:
- "{{ mktemp.stdout }}/kubeservicecatalog_roles_bindings.yml"
- delete_after: yes
- oc_process:
create: True
- template_name: service-catalog
+ template_name: service-catalog-role-bindings
namespace: "kube-service-catalog"
- copy:
@@ -64,16 +63,15 @@
dest: "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
- oc_obj:
- name: kube-system-service-catalog
+ name: kube-system-service-catalog-role-bindings
kind: template
namespace: kube-system
files:
- "{{ mktemp.stdout }}/kubesystem_roles_bindings.yml"
- delete_after: yes
- oc_process:
create: True
- template_name: kube-system-service-catalog
+ template_name: kube-system-service-catalog-role-bindings
namespace: kube-system
- oc_obj:
@@ -85,19 +83,19 @@
# only do this if we don't already have the updated role info
- name: Generate apply template for clusterrole/edit
template:
- src: sc_role_patching.j2
+ src: sc_admin_edit_role_patching.j2
dest: "{{ mktemp.stdout }}/edit_sc_patch.yml"
vars:
original_content: "{{ edit_yaml.results.results[0] | to_yaml }}"
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update edit role for service catalog and pod preset access
command: >
oc replace -f {{ mktemp.stdout }}/edit_sc_patch.yml
when:
- - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not edit_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not edit_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
- oc_obj:
name: admin
@@ -108,19 +106,42 @@
# only do this if we don't already have the updated role info
- name: Generate apply template for clusterrole/admin
template:
- src: sc_role_patching.j2
+ src: sc_admin_edit_role_patching.j2
dest: "{{ mktemp.stdout }}/admin_sc_patch.yml"
vars:
original_content: "{{ admin_yaml.results.results[0] | to_yaml }}"
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
# only do this if we don't already have the updated role info
- name: update admin role for service catalog and pod preset access
command: >
oc replace -f {{ mktemp.stdout }}/admin_sc_patch.yml
when:
- - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['instances', 'bindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+ - not admin_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['create', 'update', 'delete', 'get', 'list', 'watch']) or not admin_yaml.results.results[0] | oo_contains_rule(['settings.k8s.io'], ['podpresets'], ['create', 'update', 'delete', 'get', 'list', 'watch'])
+
+- oc_obj:
+ name: view
+ kind: clusterrole
+ state: list
+ register: view_yaml
+
+# only do this if we don't already have the updated role info
+- name: Generate apply template for clusterrole/view
+ template:
+ src: sc_view_role_patching.j2
+ dest: "{{ mktemp.stdout }}/view_sc_patch.yml"
+ vars:
+ original_content: "{{ view_yaml.results.results[0] | to_yaml }}"
+ when:
+ - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
+
+# only do this if we don't already have the updated role info
+- name: update view role for service catalog access
+ command: >
+ oc replace -f {{ mktemp.stdout }}/view_sc_patch.yml
+ when:
+ - not view_yaml.results.results[0] | oo_contains_rule(['servicecatalog.k8s.io'], ['serviceinstances', 'servicebindings'], ['get', 'list', 'watch'])
- oc_adm_policy_user:
namespace: kube-service-catalog
diff --git a/roles/openshift_service_catalog/tasks/remove.yml b/roles/openshift_service_catalog/tasks/remove.yml
index 2fb1ec440..ca9844e79 100644
--- a/roles/openshift_service_catalog/tasks/remove.yml
+++ b/roles/openshift_service_catalog/tasks/remove.yml
@@ -1,11 +1,7 @@
---
- name: Remove Service Catalog APIServer
command: >
- oc delete apiservices.apiregistration.k8s.io/v1alpha1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
-
-- name: Remove Policy Binding
- command: >
- oc delete policybindings/kube-system:default -n kube-system --ignore-not-found
+ oc delete apiservices.apiregistration.k8s.io/v1beta1.servicecatalog.k8s.io --ignore-not-found -n kube-service-catalog
# TODO: this module doesn't currently remove this
#- name: Remove service catalog api service
@@ -13,7 +9,7 @@
# state: absent
# namespace: "kube-service-catalog"
# kind: apiservices.apiregistration.k8s.io
-# name: v1alpha1.servicecatalog.k8s.io
+# name: v1beta1.servicecatalog.k8s.io
- name: Remove Service Catalog API Server route
oc_obj:
@@ -50,6 +46,26 @@
kind: deployment
name: controller-manager
+- name: Remove Service Catalog kube-system Role Bindinds
+ shell: >
+ oc process kube-system-service-catalog-role-bindings -n kube-system | oc delete --ignore-not-found -f -
+
+- oc_obj:
+ kind: template
+ name: "kube-system-service-catalog-role-bindings"
+ namespace: kube-system
+ state: absent
+
+- name: Remove Service Catalog kube-service-catalog Role Bindinds
+ shell: >
+ oc process service-catalog-role-bindings -n kube-service-catalog | oc delete --ignore-not-found -f -
+
+- oc_obj:
+ kind: template
+ name: "service-catalog-role-bindings"
+ namespace: kube-service-catalog
+ state: absent
+
- name: Remove Service Catalog namespace
oc_project:
state: absent
diff --git a/roles/openshift_service_catalog/templates/api_server.j2 b/roles/openshift_service_catalog/templates/api_server.j2
index c09834fd4..0e5bb7230 100644
--- a/roles/openshift_service_catalog/templates/api_server.j2
+++ b/roles/openshift_service_catalog/templates/api_server.j2
@@ -24,6 +24,7 @@ spec:
{% endfor %}
containers:
- args:
+ - apiserver
- --storage-type
- etcd
- --secure-port
@@ -41,9 +42,11 @@ spec:
- --cors-allowed-origins
- {{ cors_allowed_origin }}
- --admission-control
- - "KubernetesNamespaceLifecycle"
+ - KubernetesNamespaceLifecycle,DefaultServicePlan,ServiceBindingsLifecycle,ServicePlanChangeValidator,BrokerAuthSarCheck
+ - --feature-gates
+ - OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
- command: ["/usr/bin/apiserver"]
+ command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
name: apiserver
ports:
diff --git a/roles/openshift_service_catalog/templates/controller_manager.j2 b/roles/openshift_service_catalog/templates/controller_manager.j2
index 1bbc0fa2c..e5e5f6b50 100644
--- a/roles/openshift_service_catalog/templates/controller_manager.j2
+++ b/roles/openshift_service_catalog/templates/controller_manager.j2
@@ -29,11 +29,17 @@ spec:
fieldRef:
fieldPath: metadata.namespace
args:
+ - controller-manager
- -v
- "5"
- - "--leader-election-namespace=$(K8S_NAMESPACE)"
+ - --leader-election-namespace
+ - kube-service-catalog
+ - --broker-relist-interval
+ - "5m"
+ - --feature-gates
+ - OriginatingIdentity=true
image: {{ openshift_service_catalog_image_prefix }}service-catalog:{{ openshift_service_catalog_image_version }}
- command: ["/usr/bin/controller-manager"]
+ command: ["/usr/bin/service-catalog"]
imagePullPolicy: Always
name: controller-manager
ports:
@@ -41,7 +47,19 @@ spec:
protocol: TCP
resources: {}
terminationMessagePath: /dev/termination-log
+ volumeMounts:
+ - mountPath: /var/run/kubernetes-service-catalog
+ name: service-catalog-ssl
+ readOnly: true
dnsPolicy: ClusterFirst
restartPolicy: Always
securityContext: {}
terminationGracePeriodSeconds: 30
+ volumes:
+ - name: service-catalog-ssl
+ secret:
+ defaultMode: 420
+ items:
+ - key: tls.crt
+ path: apiserver.crt
+ secretName: apiserver-ssl
diff --git a/roles/openshift_service_catalog/templates/sc_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2
index 69b062b3f..4629d5bb3 100644
--- a/roles/openshift_service_catalog/templates/sc_role_patching.j2
+++ b/roles/openshift_service_catalog/templates/sc_admin_edit_role_patching.j2
@@ -3,8 +3,8 @@
- "servicecatalog.k8s.io"
attributeRestrictions: null
resources:
- - instances
- - bindings
+ - serviceinstances
+ - servicebindings
verbs:
- create
- update
diff --git a/roles/openshift_service_catalog/templates/sc_view_role_patching.j2 b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2
new file mode 100644
index 000000000..838993854
--- /dev/null
+++ b/roles/openshift_service_catalog/templates/sc_view_role_patching.j2
@@ -0,0 +1,11 @@
+{{ original_content }}
+- apiGroups:
+ - "servicecatalog.k8s.io"
+ attributeRestrictions: null
+ resources:
+ - serviceinstances
+ - servicebindings
+ verbs:
+ - get
+ - list
+ - watch
diff --git a/roles/openshift_service_catalog/vars/openshift-enterprise.yml b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
index 4df60e9a8..cab9cc7d8 100644
--- a/roles/openshift_service_catalog/vars/openshift-enterprise.yml
+++ b/roles/openshift_service_catalog/vars/openshift-enterprise.yml
@@ -1,3 +1,3 @@
---
__openshift_service_catalog_image_prefix: "registry.access.redhat.com/openshift3/ose-"
-__openshift_service_catalog_image_version: "v3.6"
+__openshift_service_catalog_image_version: "v3.7"
diff --git a/roles/openshift_storage_glusterfs/README.md b/roles/openshift_storage_glusterfs/README.md
index d0bc0e028..abe411f67 100644
--- a/roles/openshift_storage_glusterfs/README.md
+++ b/roles/openshift_storage_glusterfs/README.md
@@ -119,13 +119,13 @@ are an exception:
Additionally, this role's behavior responds to the following registry-specific
variables:
-| Name | Default value | Description |
-|-----------------------------------------------|------------------------------|-----------------------------------------|
-| openshift_hosted_registry_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
-| openshift_hosted_registry_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
-| openshift_hosted_registry_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
-| openshift_hosted_registry_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
-| openshift_hosted_registry_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
+| Name | Default value | Description |
+|-------------------------------------------------------|------------------------------|-----------------------------------------|
+| openshift_hosted_registry_storage_glusterfs_endpoints | glusterfs-registry-endpoints | The name for the Endpoints resource that will point the registry to the GlusterFS nodes
+| openshift_hosted_registry_storage_glusterfs_path | glusterfs-registry-volume | The name for the GlusterFS volume that will provide registry storage
+| openshift_hosted_registry_storage_glusterfs_readonly | False | Whether the GlusterFS volume should be read-only
+| openshift_hosted_registry_storage_glusterfs_swap | False | Whether to swap an existing registry's storage volume for a GlusterFS volume
+| openshift_hosted_registry_storage_glusterfs_swapcopy | True | If swapping, copy the contents of the pre-existing registry storage to the new GlusterFS volume
Dependencies
------------
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml
new file mode 100644
index 000000000..7b705c2d4
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/deploy-heketi-template.yml
@@ -0,0 +1,135 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: heketi-template
+ deploy-heketi: support
+ annotations:
+ description: Bootstrap Heketi installation
+ tags: glusterfs,heketi,installation
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-service
+ deploy-heketi: support
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: deploy-heketi-${CLUSTER_NAME}
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-route
+ deploy-heketi: support
+ spec:
+ to:
+ kind: Service
+ name: deploy-heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: deploy-heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-dc
+ deploy-heketi: support
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: deploy-heketi
+ labels:
+ glusterfs: deploy-heketi-${CLUSTER_NAME}-pod
+ deploy-heketi: support
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml
new file mode 100644
index 000000000..8c5e1ded3
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/glusterfs-template.yml
@@ -0,0 +1,136 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: glusterfs
+ labels:
+ glusterfs: template
+ annotations:
+ description: GlusterFS DaemonSet template
+ tags: glusterfs
+objects:
+- kind: DaemonSet
+ apiVersion: extensions/v1beta1
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-daemonset
+ annotations:
+ description: GlusterFS DaemonSet
+ tags: glusterfs
+ spec:
+ selector:
+ matchLabels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ template:
+ metadata:
+ name: glusterfs-${CLUSTER_NAME}
+ labels:
+ glusterfs: ${CLUSTER_NAME}-pod
+ glusterfs-node: pod
+ spec:
+ nodeSelector: "${{NODE_LABELS}}"
+ hostNetwork: true
+ containers:
+ - name: glusterfs
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ volumeMounts:
+ - name: glusterfs-heketi
+ mountPath: "/var/lib/heketi"
+ - name: glusterfs-run
+ mountPath: "/run"
+ - name: glusterfs-lvm
+ mountPath: "/run/lvm"
+ - name: glusterfs-etc
+ mountPath: "/etc/glusterfs"
+ - name: glusterfs-logs
+ mountPath: "/var/log/glusterfs"
+ - name: glusterfs-config
+ mountPath: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ mountPath: "/dev"
+ - name: glusterfs-misc
+ mountPath: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ mountPath: "/sys/fs/cgroup"
+ readOnly: true
+ - name: glusterfs-ssl
+ mountPath: "/etc/ssl"
+ readOnly: true
+ securityContext:
+ capabilities: {}
+ privileged: true
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 40
+ exec:
+ command:
+ - "/bin/bash"
+ - "-c"
+ - systemctl status glusterd.service
+ periodSeconds: 25
+ successThreshold: 1
+ failureThreshold: 15
+ resources: {}
+ terminationMessagePath: "/dev/termination-log"
+ volumes:
+ - name: glusterfs-heketi
+ hostPath:
+ path: "/var/lib/heketi"
+ - name: glusterfs-run
+ emptyDir: {}
+ - name: glusterfs-lvm
+ hostPath:
+ path: "/run/lvm"
+ - name: glusterfs-etc
+ hostPath:
+ path: "/etc/glusterfs"
+ - name: glusterfs-logs
+ hostPath:
+ path: "/var/log/glusterfs"
+ - name: glusterfs-config
+ hostPath:
+ path: "/var/lib/glusterd"
+ - name: glusterfs-dev
+ hostPath:
+ path: "/dev"
+ - name: glusterfs-misc
+ hostPath:
+ path: "/var/lib/misc/glusterfsd"
+ - name: glusterfs-cgroup
+ hostPath:
+ path: "/sys/fs/cgroup"
+ - name: glusterfs-ssl
+ hostPath:
+ path: "/etc/ssl"
+ restartPolicy: Always
+ terminationGracePeriodSeconds: 30
+ dnsPolicy: ClusterFirst
+ securityContext: {}
+parameters:
+- name: NODE_LABELS
+ displayName: Daemonset Node Labels
+ description: Labels which define the daemonset node selector. Must contain at least one label of the format \'glusterfs=<CLUSTER_NAME>-host\'
+ value: '{ "glusterfs": "storage-host" }'
+- name: IMAGE_NAME
+ displayName: GlusterFS container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: GlusterFS container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify which heketi service manages this cluster, useful for running multiple heketi instances
+ value: storage
diff --git a/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml
new file mode 100644
index 000000000..61b6a8c13
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/files/v1.5/heketi-template.yml
@@ -0,0 +1,134 @@
+---
+kind: Template
+apiVersion: v1
+metadata:
+ name: heketi
+ labels:
+ glusterfs: heketi-template
+ annotations:
+ description: Heketi service deployment template
+ tags: glusterfs,heketi
+objects:
+- kind: Service
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-service
+ annotations:
+ description: Exposes Heketi service
+ spec:
+ ports:
+ - name: heketi
+ port: 8080
+ targetPort: 8080
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+- kind: Route
+ apiVersion: v1
+ metadata:
+ name: ${HEKETI_ROUTE}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-route
+ spec:
+ to:
+ kind: Service
+ name: heketi-${CLUSTER_NAME}
+- kind: DeploymentConfig
+ apiVersion: v1
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-dc
+ annotations:
+ description: Defines how to deploy Heketi
+ spec:
+ replicas: 1
+ selector:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ triggers:
+ - type: ConfigChange
+ strategy:
+ type: Recreate
+ template:
+ metadata:
+ name: heketi-${CLUSTER_NAME}
+ labels:
+ glusterfs: heketi-${CLUSTER_NAME}-pod
+ spec:
+ serviceAccountName: heketi-${CLUSTER_NAME}-service-account
+ containers:
+ - name: heketi
+ image: ${IMAGE_NAME}:${IMAGE_VERSION}
+ imagePullPolicy: IfNotPresent
+ env:
+ - name: HEKETI_USER_KEY
+ value: ${HEKETI_USER_KEY}
+ - name: HEKETI_ADMIN_KEY
+ value: ${HEKETI_ADMIN_KEY}
+ - name: HEKETI_EXECUTOR
+ value: ${HEKETI_EXECUTOR}
+ - name: HEKETI_FSTAB
+ value: /var/lib/heketi/fstab
+ - name: HEKETI_SNAPSHOT_LIMIT
+ value: '14'
+ - name: HEKETI_KUBE_GLUSTER_DAEMONSET
+ value: '1'
+ - name: HEKETI_KUBE_NAMESPACE
+ value: ${HEKETI_KUBE_NAMESPACE}
+ ports:
+ - containerPort: 8080
+ volumeMounts:
+ - name: db
+ mountPath: /var/lib/heketi
+ - name: config
+ mountPath: /etc/heketi
+ readinessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 3
+ httpGet:
+ path: /hello
+ port: 8080
+ livenessProbe:
+ timeoutSeconds: 3
+ initialDelaySeconds: 30
+ httpGet:
+ path: /hello
+ port: 8080
+ volumes:
+ - name: db
+ glusterfs:
+ endpoints: heketi-db-${CLUSTER_NAME}-endpoints
+ path: heketidbstorage
+ - name: config
+ secret:
+ secretName: heketi-${CLUSTER_NAME}-config-secret
+parameters:
+- name: HEKETI_USER_KEY
+ displayName: Heketi User Secret
+ description: Set secret for those creating volumes as type _user_
+- name: HEKETI_ADMIN_KEY
+ displayName: Heketi Administrator Secret
+ description: Set secret for administration of the Heketi service as user _admin_
+- name: HEKETI_EXECUTOR
+ displayName: heketi executor type
+ description: Set the executor type, kubernetes or ssh
+ value: kubernetes
+- name: HEKETI_KUBE_NAMESPACE
+ displayName: Namespace
+ description: Set the namespace where the GlusterFS pods reside
+ value: default
+- name: HEKETI_ROUTE
+ displayName: heketi route name
+ description: Set the hostname for the route URL
+ value: "heketi-glusterfs"
+- name: IMAGE_NAME
+ displayName: heketi container image name
+ required: True
+- name: IMAGE_VERSION
+ displayName: heketi container image version
+ required: True
+- name: CLUSTER_NAME
+ displayName: GlusterFS cluster name
+ description: A unique name to identify this heketi service, useful for running multiple heketi instances
+ value: glusterfs
diff --git a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
index 9ebb0d5ec..7b705c2d4 100644
--- a/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
+++ b/roles/openshift_storage_glusterfs/files/v3.7/deploy-heketi-template.yml
@@ -85,8 +85,6 @@ objects:
volumeMounts:
- name: db
mountPath: /var/lib/heketi
- - name: topology
- mountPath: ${TOPOLOGY_PATH}
- name: config
mountPath: /etc/heketi
readinessProbe:
@@ -103,9 +101,6 @@ objects:
port: 8080
volumes:
- name: db
- - name: topology
- secret:
- secretName: heketi-${CLUSTER_NAME}-topology-secret
- name: config
secret:
secretName: heketi-${CLUSTER_NAME}-config-secret
@@ -138,6 +133,3 @@ parameters:
displayName: GlusterFS cluster name
description: A unique name to identify this heketi service, useful for running multiple heketi instances
value: glusterfs
-- name: TOPOLOGY_PATH
- displayName: heketi topology file location
- required: True
diff --git a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
index 074904bec..54a6dd7c3 100644
--- a/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
+++ b/roles/openshift_storage_glusterfs/tasks/heketi_deploy_part2.yml
@@ -1,6 +1,6 @@
---
- name: Create heketi DB volume
- command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --image {{ glusterfs_heketi_image}}:{{ glusterfs_heketi_version }} --listfile /tmp/heketi-storage.json"
+ command: "{{ glusterfs_heketi_client }} setup-openshift-heketi-storage --listfile /tmp/heketi-storage.json"
register: setup_storage
- name: Copy heketi-storage list
diff --git a/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
new file mode 100644
index 000000000..030fa81c9
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/tasks/kernel_modules.yml
@@ -0,0 +1,12 @@
+---
+- name: Ensure device mapper modules loaded
+ template:
+ src: glusterfs.conf
+ dest: /etc/modules-load.d/glusterfs.conf
+ register: km
+
+- name: load kernel modules
+ systemd:
+ name: systemd-modules-load.service
+ state: restarted
+ when: km | changed
diff --git a/roles/openshift_storage_glusterfs/templates/glusterfs.conf b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
new file mode 100644
index 000000000..dd4d6e6f7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/glusterfs.conf
@@ -0,0 +1,4 @@
+#{{ ansible_managed }}
+dm_thin_pool
+dm_snapshot
+dm_mirror \ No newline at end of file
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2
new file mode 100644
index 000000000..11c9195bb
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2
new file mode 100644
index 000000000..3f869d2b7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-registry-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: glusterfs-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
new file mode 100644
index 000000000..454e84aaf
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/glusterfs-storageclass.yml.j2
@@ -0,0 +1,13 @@
+---
+apiVersion: storage.k8s.io/v1beta1
+kind: StorageClass
+metadata:
+ name: glusterfs-{{ glusterfs_name }}
+provisioner: kubernetes.io/glusterfs
+parameters:
+ resturl: "http://{% if glusterfs_heketi_is_native %}{{ glusterfs_heketi_route }}{% else %}{{ glusterfs_heketi_url }}:{{ glusterfs_heketi_port }}{% endif %}"
+ restuser: "admin"
+{% if glusterfs_heketi_admin_key is defined %}
+ secretNamespace: "{{ glusterfs_namespace }}"
+ secretName: "heketi-{{ glusterfs_name }}-admin-secret"
+{%- endif -%}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2
new file mode 100644
index 000000000..99cbdf748
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-endpoints.yml.j2
@@ -0,0 +1,12 @@
+---
+apiVersion: v1
+kind: Endpoints
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+subsets:
+- addresses:
+{% for node in glusterfs_nodes %}
+ - ip: {{ hostvars[node].glusterfs_ip | default(hostvars[node].openshift.common.ip) }}
+{% endfor %}
+ ports:
+ - port: 1
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2
new file mode 100644
index 000000000..dcb896441
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi-service.yml.j2
@@ -0,0 +1,10 @@
+---
+apiVersion: v1
+kind: Service
+metadata:
+ name: heketi-db-{{ glusterfs_name }}-endpoints
+spec:
+ ports:
+ - port: 1
+status:
+ loadBalancer: {}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2
new file mode 100644
index 000000000..579b11bb7
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/heketi.json.j2
@@ -0,0 +1,36 @@
+{
+ "_port_comment": "Heketi Server Port Number",
+ "port" : "8080",
+
+ "_use_auth": "Enable JWT authorization. Please enable for deployment",
+ "use_auth" : false,
+
+ "_jwt" : "Private keys for access",
+ "jwt" : {
+ "_admin" : "Admin has access to all APIs",
+ "admin" : {
+ "key" : "My Secret"
+ },
+ "_user" : "User only has access to /volumes endpoint",
+ "user" : {
+ "key" : "My Secret"
+ }
+ },
+
+ "_glusterfs_comment": "GlusterFS Configuration",
+ "glusterfs" : {
+
+ "_executor_comment": "Execute plugin. Possible choices: mock, kubernetes, ssh",
+ "executor" : "{{ glusterfs_heketi_executor }}",
+
+ "_db_comment": "Database file name",
+ "db" : "/var/lib/heketi/heketi.db",
+
+ "sshexec" : {
+ "keyfile" : "/etc/heketi/private_key",
+ "port" : "{{ glusterfs_heketi_ssh_port }}",
+ "user" : "{{ glusterfs_heketi_ssh_user }}",
+ "sudo" : {{ glusterfs_heketi_ssh_sudo | lower }}
+ }
+ }
+}
diff --git a/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2 b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2
new file mode 100644
index 000000000..d6c28f6dd
--- /dev/null
+++ b/roles/openshift_storage_glusterfs/templates/v1.5/topology.json.j2
@@ -0,0 +1,49 @@
+{
+ "clusters": [
+{%- set clusters = {} -%}
+{%- for node in glusterfs_nodes -%}
+ {%- set cluster = hostvars[node].glusterfs_cluster if 'glusterfs_cluster' in node else '1' -%}
+ {%- if cluster in clusters -%}
+ {%- set _dummy = clusters[cluster].append(node) -%}
+ {%- else -%}
+ {%- set _dummy = clusters.update({cluster: [ node, ]}) -%}
+ {%- endif -%}
+{%- endfor -%}
+{%- for cluster in clusters -%}
+ {
+ "nodes": [
+{%- for node in clusters[cluster] -%}
+ {
+ "node": {
+ "hostnames": {
+ "manage": [
+{%- if 'glusterfs_hostname' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_hostname }}"
+{%- elif 'openshift' in hostvars[node] -%}
+ "{{ hostvars[node].openshift.node.nodename }}"
+{%- else -%}
+ "{{ node }}"
+{%- endif -%}
+ ],
+ "storage": [
+{%- if 'glusterfs_ip' in hostvars[node] -%}
+ "{{ hostvars[node].glusterfs_ip }}"
+{%- else -%}
+ "{{ hostvars[node].openshift.common.ip }}"
+{%- endif -%}
+ ]
+ },
+ "zone": {{ hostvars[node].glusterfs_zone | default(1) }}
+ },
+ "devices": [
+{%- for device in hostvars[node].glusterfs_devices -%}
+ "{{ device }}"{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+ }{% if not loop.last %},{% endif %}
+{%- endfor -%}
+ ]
+}
diff --git a/roles/openshift_storage_nfs/tasks/main.yml b/roles/openshift_storage_nfs/tasks/main.yml
index 3047fbaf9..c4e023c1e 100644
--- a/roles/openshift_storage_nfs/tasks/main.yml
+++ b/roles/openshift_storage_nfs/tasks/main.yml
@@ -35,6 +35,9 @@
- "{{ openshift.logging }}"
- "{{ openshift.loggingops }}"
- "{{ openshift.hosted.etcd }}"
+ - "{{ openshift.prometheus }}"
+ - "{{ openshift.prometheus.alertmanager }}"
+ - "{{ openshift.prometheus.alertbuffer }}"
- name: Configure exports
template:
diff --git a/roles/openshift_storage_nfs/templates/exports.j2 b/roles/openshift_storage_nfs/templates/exports.j2
index 0141e0d25..c2a741035 100644
--- a/roles/openshift_storage_nfs/templates/exports.j2
+++ b/roles/openshift_storage_nfs/templates/exports.j2
@@ -3,3 +3,6 @@
{{ openshift.logging.storage.nfs.directory }}/{{ openshift.logging.storage.volume.name }} {{ openshift.logging.storage.nfs.options }}
{{ openshift.loggingops.storage.nfs.directory }}/{{ openshift.loggingops.storage.volume.name }} {{ openshift.loggingops.storage.nfs.options }}
{{ openshift.hosted.etcd.storage.nfs.directory }}/{{ openshift.hosted.etcd.storage.volume.name }} {{ openshift.hosted.etcd.storage.nfs.options }}
+{{ openshift.prometheus.storage.nfs.directory }}/{{ openshift.prometheus.storage.volume.name }} {{ openshift.prometheus.storage.nfs.options }}
+{{ openshift.prometheus.alertmanager.storage.nfs.directory }}/{{ openshift.prometheus.alertmanager.storage.volume.name }} {{ openshift.prometheus.alertmanager.storage.nfs.options }}
+{{ openshift.prometheus.alertbuffer.storage.nfs.directory }}/{{ openshift.prometheus.alertbuffer.storage.volume.name }} {{ openshift.prometheus.alertbuffer.storage.nfs.options }}
diff --git a/roles/openshift_version/defaults/main.yml b/roles/openshift_version/defaults/main.yml
index 53d10f1f8..01a1a7472 100644
--- a/roles/openshift_version/defaults/main.yml
+++ b/roles/openshift_version/defaults/main.yml
@@ -1,3 +1,2 @@
---
openshift_protect_installed_version: True
-version_install_base_package: False
diff --git a/roles/openshift_version/tasks/main.yml b/roles/openshift_version/tasks/main.yml
index f4e9ff43a..1c8b9046c 100644
--- a/roles/openshift_version/tasks/main.yml
+++ b/roles/openshift_version/tasks/main.yml
@@ -5,16 +5,6 @@
is_containerized: "{{ openshift.common.is_containerized | default(False) | bool }}"
is_atomic: "{{ openshift.common.is_atomic | default(False) | bool }}"
-# This is only needed on masters and nodes; version_install_base_package
-# should be set by a play externally.
-- name: Install the base package for versioning
- package:
- name: "{{ openshift.common.service_type }}{{ openshift_pkg_version | default('') | oo_image_tag_to_rpm_version(include_dash=True) }}"
- state: present
- when:
- - not is_containerized | bool
- - version_install_base_package | bool
-
# Block attempts to install origin without specifying some kind of version information.
# This is because the latest tags for origin are usually alpha builds, which should not
# be used by default. Users must indicate what they want.
diff --git a/roles/template_service_broker/defaults/main.yml b/roles/template_service_broker/defaults/main.yml
index fb407c4a2..a92a138b0 100644
--- a/roles/template_service_broker/defaults/main.yml
+++ b/roles/template_service_broker/defaults/main.yml
@@ -2,3 +2,4 @@
# placeholder file?
template_service_broker_remove: False
template_service_broker_install: False
+openshift_template_service_broker_namespaces: ['openshift']
diff --git a/roles/template_service_broker/files/openshift-ansible-catalog-console.js b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
index b3a3d3428..622afb6bd 100644
--- a/roles/template_service_broker/files/openshift-ansible-catalog-console.js
+++ b/roles/template_service_broker/files/openshift-ansible-catalog-console.js
@@ -1 +1 @@
-window.OPENSHIFT_CONSTANTS.ENABLE_TECH_PREVIEW_FEATURE.template_service_broker = true;
+window.OPENSHIFT_CONSTANTS.TEMPLATE_SERVICE_BROKER_ENABLED = true;
diff --git a/roles/template_service_broker/tasks/install.yml b/roles/template_service_broker/tasks/install.yml
index f5fd6487c..6a532a206 100644
--- a/roles/template_service_broker/tasks/install.yml
+++ b/roles/template_service_broker/tasks/install.yml
@@ -6,7 +6,7 @@
- "{{ openshift_deployment_type | default(deployment_type) }}.yml"
- "default_images.yml"
-- name: set ansible_service_broker facts
+- name: set template_service_broker facts
set_fact:
template_service_broker_prefix: "{{ template_service_broker_prefix | default(__template_service_broker_prefix) }}"
template_service_broker_version: "{{ template_service_broker_version | default(__template_service_broker_version) }}"
@@ -28,10 +28,24 @@
- "{{ __tsb_template_file }}"
- "{{ __tsb_rbac_file }}"
- "{{ __tsb_broker_file }}"
+ - "{{ __tsb_config_file }}"
+
+- yedit:
+ src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}"
+ key: templateNamespaces
+ value: "{{ openshift_template_service_broker_namespaces }}"
+ value_type: list
+
+- slurp:
+ src: "{{ mktemp.stdout }}/{{ __tsb_config_file }}"
+ register: config
- name: Apply template file
shell: >
- oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" --param API_SERVER_CONFIG="{{ lookup('file', __tsb_files_location ~ '/' ~ __tsb_config_file) }}" --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}" | kubectl apply -f -
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}"
+ --param API_SERVER_CONFIG="{{ config['content'] | b64decode }}"
+ --param IMAGE="{{ template_service_broker_prefix }}{{ template_service_broker_image_name }}:{{ template_service_broker_version }}"
+ | kubectl apply -f -
# reconcile with rbac
- name: Reconcile with RBAC file
@@ -62,7 +76,7 @@
when: openshift_master_config_dir is undefined
- slurp:
- src: "{{ openshift_master_config_dir }}/ca.crt"
+ src: "{{ openshift_master_config_dir }}/service-signer.crt"
register: __ca_bundle
# Register with broker
diff --git a/roles/template_service_broker/tasks/remove.yml b/roles/template_service_broker/tasks/remove.yml
index f3afe65ed..28836f97f 100644
--- a/roles/template_service_broker/tasks/remove.yml
+++ b/roles/template_service_broker/tasks/remove.yml
@@ -13,11 +13,11 @@
- name: Delete TSB broker
shell: >
- oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete -f -
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_broker_file }}" | oc delete --ignore-not-found -f -
- name: Delete TSB objects
shell: >
- oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | kubectl delete -f -
+ oc process -f "{{ mktemp.stdout }}/{{ __tsb_template_file }}" | oc delete --ignore-not-found -f -
- name: empty out tech preview extension file for service console UI
copy:
diff --git a/roles/tuned/defaults/main.yml b/roles/tuned/defaults/main.yml
new file mode 100644
index 000000000..418a4b521
--- /dev/null
+++ b/roles/tuned/defaults/main.yml
@@ -0,0 +1,3 @@
+---
+tuned_etc_directory: '/etc/tuned'
+tuned_templates_source: '../templates'
diff --git a/roles/tuned/meta/main.yml b/roles/tuned/meta/main.yml
new file mode 100644
index 000000000..833d94c13
--- /dev/null
+++ b/roles/tuned/meta/main.yml
@@ -0,0 +1,13 @@
+---
+galaxy_info:
+ author: Jiri Mencak
+ description: Restart the tuned daemon if present and make it use the recommended profile
+ company: Red Hat, Inc.
+ license: Apache License, Version 2.0
+ min_ansible_version: 2.3
+ platforms:
+ - name: EL
+ versions:
+ - 7
+ categories:
+ - cloud
diff --git a/roles/openshift_node/tasks/tuned.yml b/roles/tuned/tasks/main.yml
index 425bf6a26..e95d274d5 100644
--- a/roles/openshift_node/tasks/tuned.yml
+++ b/roles/tuned/tasks/main.yml
@@ -12,8 +12,6 @@
- name: Set tuned OpenShift variables
set_fact:
openshift_tuned_guest_profile: "{{ 'atomic-guest' if openshift.common.is_atomic else 'virtual-guest' }}"
- tuned_etc_directory: '/etc/tuned'
- tuned_templates_source: '../templates/tuned'
- name: Ensure directory structure exists
file:
diff --git a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf b/roles/tuned/templates/openshift-control-plane/tuned.conf
index f22f21065..f22f21065 100644
--- a/roles/openshift_node/templates/tuned/openshift-control-plane/tuned.conf
+++ b/roles/tuned/templates/openshift-control-plane/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf b/roles/tuned/templates/openshift-node/tuned.conf
index 78c7d19c9..78c7d19c9 100644
--- a/roles/openshift_node/templates/tuned/openshift-node/tuned.conf
+++ b/roles/tuned/templates/openshift-node/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/openshift/tuned.conf b/roles/tuned/templates/openshift/tuned.conf
index 68ac5dadb..68ac5dadb 100644
--- a/roles/openshift_node/templates/tuned/openshift/tuned.conf
+++ b/roles/tuned/templates/openshift/tuned.conf
diff --git a/roles/openshift_node/templates/tuned/recommend.conf b/roles/tuned/templates/recommend.conf
index 5fa765798..086e5673d 100644
--- a/roles/openshift_node/templates/tuned/recommend.conf
+++ b/roles/tuned/templates/recommend.conf
@@ -1,8 +1,11 @@
-[openshift-node]
-/etc/origin/node/node-config.yaml=.*region=primary
-
[openshift-control-plane,master]
/etc/origin/master/master-config.yaml=.*
[openshift-control-plane,node]
/etc/origin/node/node-config.yaml=.*region=infra
+
+[openshift-control-plane,lb]
+/etc/haproxy/haproxy.cfg=.*
+
+[openshift-node]
+/etc/origin/node/node-config.yaml=.*
diff --git a/utils/src/ooinstall/oo_config.py b/utils/src/ooinstall/oo_config.py
index c3501c018..9ecd63a80 100644
--- a/utils/src/ooinstall/oo_config.py
+++ b/utils/src/ooinstall/oo_config.py
@@ -220,6 +220,7 @@ class OOConfig(object):
persisted_value = loaded_config.get(setting)
if persisted_value is not None:
self.settings[setting] = str(persisted_value)
+ installer_log.debug("config: set (%s) to value (%s)", setting, persisted_value)
# We've loaded any persisted configs, let's verify any
# paths which are required for a correct and complete
@@ -344,8 +345,9 @@ class OOConfig(object):
if 'ansible_ssh_user' not in self.settings:
self.settings['ansible_ssh_user'] = ''
- self.settings['ansible_inventory_path'] = \
- '{}/hosts'.format(os.path.dirname(self.config_path))
+ if 'ansible_inventory_path' not in self.settings:
+ self.settings['ansible_inventory_path'] = \
+ '{}/hosts'.format(os.path.dirname(self.config_path))
# clean up any empty sets
empty_keys = []