From f3741a05097f1848d2b3e9a01f03e76a33487e01 Mon Sep 17 00:00:00 2001 From: Tim Bielawa Date: Mon, 9 Oct 2017 16:14:38 -0400 Subject: Management Cleanup and Provider Integration * Add container provider integration * General cleanup * Poll until service fully starts * Add notes on multiple-provider additions --- roles/openshift_management/README.md | 167 +++++++++++++++++---- roles/openshift_management/defaults/main.yml | 14 ++ .../files/examples/container_providers.yml | 22 +++ .../tasks/add_container_provider.yml | 65 ++++++++ .../tasks/add_many_container_providers.yml | 27 ++++ roles/openshift_management/tasks/main.yml | 25 ++- .../tasks/storage/create_nfs_pvs.yml | 8 +- roles/openshift_management/tasks/template.yml | 26 ++-- roles/openshift_nfs/tasks/create_export.yml | 2 +- 9 files changed, 304 insertions(+), 52 deletions(-) create mode 100644 roles/openshift_management/files/examples/container_providers.yml create mode 100644 roles/openshift_management/tasks/add_container_provider.yml create mode 100644 roles/openshift_management/tasks/add_many_container_providers.yml (limited to 'roles') diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 3a71d9211..fb961e38e 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -38,6 +38,10 @@ deployment type (`openshift_deployment_type`): * [Cloud Provider](#cloud-provider) * [Preconfigured (Expert Configuration Only)](#preconfigured-expert-configuration-only) * [Customization](#customization) + * [Container Provider](#container-provider) + * [Manually](#manually) + * [Automatically](#automatically) + * [Multiple Providers](#multiple-providers) * [Uninstall](#uninstall) * [Additional Information](#additional-information) @@ -80,30 +84,10 @@ to there being no databases that require pods. *Be extra careful* if you are overriding template parameters. Including parameters not defined in a template **will -cause errors**. - -**Container Provider Integration** - If you want add your container -platform (OCP/Origin) as a *Container Provider* in CFME/MIQ then you -must ensure that the infrastructure management hooks are installed. - -* During your OCP/Origin install, ensure that you have the - `openshift_use_manageiq` parameter set to `true` in your inventory - at install time. This will create a `management-infra` project and a - service account user. -* After CFME/MIQ is installed, obtain the `management-admin` service - account token and copy it somewhere safe. - -```bash -$ oc serviceaccounts get-token -n management-infra management-admin -eyJhuGdiOiJSUzI1NiIsInR5dCI6IkpXVCJ9.eyJpd9MiOiJrbWJldm5lbGVzL9NldnZpY2VhY2NvbW50Iiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9uYW1ld9BhY2UiOiJtYW5hZ2VtZW50LWluZnJhIiwiy9ViZXJuZXRldy5puy9zZXJ2yWNlYWNju9VubC9zZWNyZXQuumFtZSI6Im1humFnZW1lunQtYWRtyW4tbG9rZW4tdDBnOTAiLCJrbWJldm5lbGVzLmlvL9NldnZpY2VhY2NvbW50L9NldnZpY2UtYWNju9VubC5uYW1lIjoiuWFuYWbluWVubC1hZG1puiIsImt1YmVyumV0ZXMuyW8vd2VybmljZWFjY291unQvd2VybmljZS1hY2NvbW50LnVpZCI6IjRiZDM2MWQ1LWE1NDAtMTFlNy04YzI5LTUyNTQwMDliMmNkZCIsInN1YiI6InN5d9RluTpzZXJ2yWNlYWNju9VubDptYW5hZ2VtZW50LWluZnJhOm1humFnZW1lunQtYWRtyW4ifQ.B6sZLGD9O4vBu9MHwiG-C_4iEwjBXb7Af8BPw-LNlujDmHhOnQ-Oo4QxQKyj9edynfmDy2yutUyJ2Mm9HfDGWg4C9xhWImHoq6Nl7T5_9djkeGKkK7Ejvg4fA-IkrzEsZeQuluBvXnE6wvP0LCjUo_dx4pPyZJyp46teV9NqKQeDzeysjlMCyqp6AK6-Lj8ILG8YA6d_97HlzL_EgFBLAu0lBSn-uC_9J0gLysqBtK6TI0nExfhv9Bm1_5bdHEbKHPW7xIlYlI9AgmyTyhsQ6SoQWtL2khBjkG9TlPBq9wYJj9bzqgVZlqEfICZxgtXO7sYyuoje4y8lo0YQ0kZmig -``` - -* In the CFME/MIQ web interface, navigate to `Compute` → - `Containers` → `Providers` and select `⚙ Configuration` → `⊕ - Add a new Containers Provider` - -*See the [upstream documentation](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/index.html#containers-providers) for additional information.* - +cause errors**. If you do receive an error during the `Ensure the CFME +App is created` task, we recommend running the +[uninstall scripts](#uninstall) first before running the installer +again. # Requirements @@ -140,11 +124,13 @@ used in your Ansible inventory to control the behavior of this installer. -| Variable | Required | Default | Description | -|------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| -| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. | +| Variable | Required | Default | Description | +|------------------------------------------------------|:--------:|:------------------------------:|-------------------------------------| +| `openshift_management_project` | **No** | `openshift-management` | Namespace for the installation. | | `openshift_management_project_description` | **No** | *CloudForms Management Engine* | Namespace/project description. | -| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application | +| `openshift_management_install_management` | **No** | `false` | Boolean, set to `true` to install the application | +| `openshift_management_username` | **No** | `admin` | Default management username. Changing this values **does not change the username**. Only change this value if you have changed the name already and are running integration scripts (such as the [add container provider](#container-provider) script) | +| `openshift_management_password` | **No** | `smartvm` | Default management password. Changing this values **does not change the password**. Only change this value if you have changed the password already and are running integration scripts (such as the [add-container-provider](#container-provider) script) | | **PRODUCT CHOICE** | | | | | | `openshift_management_app_template` | **No** | `miq-template` | The project flavor to install. Choices: | | **STORAGE CLASSES** | | | | | @@ -268,6 +254,9 @@ openshift_management_app_template=cfme-template-ext-db openshift_management_template_parameters={'DATABASE_USER': 'root', 'DATABASE_PASSWORD': 'r1ck&M0r7y', 'DATABASE_IP': '10.10.10.10', 'DATABASE_PORT': '5432', 'DATABASE_NAME': 'cfme'} ``` +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. + # Limitations This release is the first OpenShift CFME release in the OCP 3.7 @@ -318,6 +307,9 @@ inventory. The following keys are required: * `DATABASE_PORT` - *note: Most PostgreSQL servers run on port `5432`* * `DATABASE_NAME` +**NOTE:** Ensure your are running PostgreSQL 9.5 or you may not be +able to deploy the app successfully. + Your inventory would contain a line similar to this: ```ini @@ -453,6 +445,120 @@ hash. This applies to **CloudForms** installations as well: [cfme-template.yaml](files/templates/cloudforms/cfme-template.yaml), [cfme-template-ext-db.yaml](files/templates/cloudforms/cfme-template-ext-db.yaml). +# Container Provider + +There are two methods for enabling container provider integration. You +can manually add OCP/Origin as a container provider, or you can try +the playbooks included with this role. + +## Manually + +See the online documentation for steps to manually add you cluster as +a container provider: + +* [Container Providers](http://manageiq.org/docs/reference/latest/doc-Managing_Providers/miq/#containers-providers) + +## Automatically + +Automated container provider integration can be accomplished using the +playbooks included with this role. + +This playbook will: + +1. Gather the necessary authentication secrets +1. Find the public routes to the Management app and the cluster API +1. Make a REST call to add this cluster as a container provider + + +``` +$ ansible-playbook -v -i playbooks/byo/openshift-management/add_container_provider.yml +``` + +## Multiple Providers + +As well as providing playbooks to integrate your *current* container +platform into the management service, this role includes a **tech +preview** script which allows you to add multiple container platforms +as container providers in any arbitrary MIQ/CFME server. + +Using the multiple-provider script requires manual configuration and +the setting of an additional inventory parameter. + + +1. Copy the + [container_providers.yml](files/examples/container_providers.yml) + example somewhere +1. Add the `openshift_management_many_container_providers_config` + parameter to your inventory and set the value to the full path of + the file you copied in `Step 1` +1. Update the `hostname`, `user`, and `password` parameters in the + `management_server` key in the `container_providers.yml` file copy +1. Fill in an entry under the `container_providers` key for *each* OCP + or Origin cluster you want to add as container providers + +**Parameters Which MUST Be Configured:** + +* `auth_key` - This is the token of a service account which has admin capabilities on the cluster. +* `hostname` - This is the hostname that points to the cluster API. Each container provider must have a unique hostname. +* `name` - This is the name of the cluster as displayed in the management server container providers overview. This must be unique. + +*Note*: You can obtain the `auth_key` bearer token from your clusters + with this command: `oc serviceaccounts get-token -n management-infra + management-admin` + +**Parameters Which MAY Be Configured:** + +* `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443` +* `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time. + +Let's see an example describing the following scenario: + +* You copied `files/examples/container_providers.yml` to `/tmp/cp.yml` +* You're adding two OCP clusters +* Your management server runs on `mgmt.example.com` + +You would customize `/tmp/cp.yml` as such: + +```yaml +--- +container_providers: + - connection_configurations: + - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "ocp-prod.example.com" + name: OCP Production + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" + - connection_configurations: + - authentication: {auth_key: "management-token-for-this-cluster", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "ocp-test.example.com" + name: OCP Testing + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: + hostname: "mgmt.example.com" + user: admin + password: b3tt3r_p4SSw0rd +``` + +Then you would add to your inventory file: + +```ini +[OSEv3:vars] +# ... +openshift_management_many_container_providers_config=/tmp/cp.yml +``` + +Finally, run the many-container-providers integration script: + +``` +$ ansible-playbook -v -i roles/openshift_management/tasks/add_many_container_providers.yml +``` + +Afterwards you will find two new container providers in your +management service. Navigate to `Compute` → `Containers` → `Providers` +to see an overview. # Uninstall @@ -461,6 +567,11 @@ installation: * `playbooks/byo/openshift-management/uninstall.yml` +NFS export definitions and data stored on NFS exports are not +automatically removed. You are urged to manually erase any data from +old application or database deployments before attempting to +initialize a new deployment. + # Additional Information The upstream project, diff --git a/roles/openshift_management/defaults/main.yml b/roles/openshift_management/defaults/main.yml index ebb56313f..8ba65b386 100644 --- a/roles/openshift_management/defaults/main.yml +++ b/roles/openshift_management/defaults/main.yml @@ -76,6 +76,20 @@ openshift_management_storage_nfs_base_dir: /exports # setting this variable. Useful for testing specific task files. openshift_management_storage_nfs_local_hostname: false +###################################################################### +# DEFAULT ACCOUNT INFORMATION +###################################################################### +# These are the default values for the username and password of the +# management app. Changing these values in your inventory will not +# change your username or password. You should only need to change +# these values in your inventory if you already changed the actual +# name and password AND are trying to use integration scripts. +# +# For example, adding this cluster as a container provider, +# playbooks/byo/openshift-management/add_container_provider.yml +openshift_management_username: admin +openshift_management_password: smartvm + ###################################################################### # SCAFFOLDING - These are parameters we pre-seed that a user may or # may not set later diff --git a/roles/openshift_management/files/examples/container_providers.yml b/roles/openshift_management/files/examples/container_providers.yml new file mode 100644 index 000000000..661f62e4d --- /dev/null +++ b/roles/openshift_management/files/examples/container_providers.yml @@ -0,0 +1,22 @@ +--- +container_providers: + - connection_configurations: + - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "OCP/Origin cluster hostname (providing API access)" + name: openshift-management + port: 8443 + type: "ManageIQ::Providers::Openshift::ContainerManager" +# Copy and update for as many OCP or Origin providers as you want to +# add to your management service + # - connection_configurations: + # - authentication: {auth_key: "management-admin-token-here", authtype: bearer, type: AuthToken} + # endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + # hostname: "OCP/Origin cluster hostname (providing API access)" + # name: openshift-management + # port: 8443 + # type: "ManageIQ::Providers::Openshift::ContainerManager" +management_server: + hostname: "Management server hostname (providing API access)" + user: admin + password: smartvm diff --git a/roles/openshift_management/tasks/add_container_provider.yml b/roles/openshift_management/tasks/add_container_provider.yml new file mode 100644 index 000000000..383e6edb5 --- /dev/null +++ b/roles/openshift_management/tasks/add_container_provider.yml @@ -0,0 +1,65 @@ +--- +- name: Ensure lib_openshift modules are available + include_role: + role: lib_openshift + +- name: Ensure OpenShift facts module is available + include_role: + role: openshift_facts + +- name: Ensure OpenShift facts are loaded + openshift_facts: + +- name: Ensure the management SA Secrets are read + oc_serviceaccount_secret: + state: list + service_account: management-admin + namespace: management-infra + register: sa + +- name: Ensure the management SA bearer token is identified + set_fact: + management_token: "{{ sa.results | oo_filter_sa_secrets }}" + +- name: Ensure the SA bearer token value is read + oc_secret: + state: list + name: "{{ management_token }}" + namespace: management-infra + decode: true + no_log: True + register: sa_secret + +- name: Ensure the SA bearer token value is saved + set_fact: + management_bearer_token: "{{ sa_secret.results.decoded.token }}" + +- name: Ensure we have the public route to the management service + oc_route: + state: list + name: httpd + namespace: openshift-management + register: route + +- name: Ensure the management service route is saved + set_fact: + management_route: "{{ route.results.0.spec.host }}" + +- name: Ensure this cluster is a container provider + uri: + url: "https://{{ management_route }}/api/providers" + body_format: json + method: POST + user: "{{ openshift_management_username }}" + password: "{{ openshift_management_password }}" + validate_certs: no + # Docs on formatting the BODY of the POST request: + # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations + body: + connection_configurations: + - authentication: {auth_key: "{{ management_bearer_token }}", authtype: bearer, type: AuthToken} + endpoint: {role: default, security_protocol: ssl-without-validation, verify_ssl: 0} + hostname: "{{ openshift.master.cluster_public_hostname }}" + name: "{{ openshift_management_project }}" + port: "{{ openshift.master.api_port }}" + type: "ManageIQ::Providers::Openshift::ContainerManager" diff --git a/roles/openshift_management/tasks/add_many_container_providers.yml b/roles/openshift_management/tasks/add_many_container_providers.yml new file mode 100644 index 000000000..f92c81277 --- /dev/null +++ b/roles/openshift_management/tasks/add_many_container_providers.yml @@ -0,0 +1,27 @@ +--- +- hosts: "{{ groups['masters'][0] }}" + tasks: + - name: Include providers/management configuration + include_vars: + file: "{{ openshift_management_many_container_providers_config }}" + + - name: Ensure this cluster is a container provider + uri: + url: "https://{{ management_server['hostname'] }}/api/providers" + body_format: json + method: POST + user: "{{ management_server['user'] }}" + password: "{{ management_server['password'] }}" + validate_certs: no + # Docs on formatting the BODY of the POST request: + # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations + body: "{{ item }}" + failed_when: false + with_items: "{{ container_providers }}" + register: results + + - name: Ensure failed additions are reported for each container provider + debug: + msg: | + FLOOP {{ item.item.hostname }} + with_items: "{{ results.results }}" diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index 86c4d0010..88290c44d 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -2,23 +2,29 @@ ######################################################################) # Users, projects, and privileges -- name: Run pre-install CFME validation checks +- name: Run pre-install Management validation checks include: validate.yml -- name: "Ensure the CFME '{{ openshift_management_project }}' namespace exists" +# This creates a service account allowing Container Provider +# integration (managing OCP/Origin via MIQ/Management) +- name: Enable Container Provider Integration + include_role: + role: openshift_manageiq + +- name: "Ensure the Management '{{ openshift_management_project }}' namespace exists" oc_project: state: present name: "{{ openshift_management_project }}" display_name: "{{ openshift_management_project_description }}" -- name: Create and Authorize CFME Accounts +- name: Create and Authorize Management Accounts include: accounts.yml ###################################################################### # STORAGE - Initialize basic storage class #--------------------------------------------------------------------- # * nfs - set up NFS shares on the first master for a proof of concept -- name: Create required NFS exports for CFME app storage +- name: Create required NFS exports for Management app storage include: storage/nfs.yml when: openshift_management_storage_class == 'nfs' @@ -45,7 +51,7 @@ ###################################################################### # APPLICATION TEMPLATE -- name: Install the CFME app and PV templates +- name: Install the Management app and PV templates include: template.yml ###################################################################### @@ -71,9 +77,16 @@ when: - openshift_management_app_template in ['miq-template', 'cfme-template'] -- name: Ensure the CFME App is created +- name: Ensure the Management App is created oc_process: namespace: "{{ openshift_management_project }}" template_name: "{{ openshift_management_template_name }}" create: True params: "{{ openshift_management_template_parameters }}" + +- name: Wait for the app to come up. May take several minutes, 30s check intervals, 10m max + command: "oc logs {{ openshift_management_flavor }}-0 -n {{ openshift_management_project }}" + register: app_seeding_logs + until: app_seeding_logs.stdout.find('Server starting complete') != -1 + delay: 30 + retries: 20 diff --git a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml index 31c845725..d1b9a8d5c 100644 --- a/roles/openshift_management/tasks/storage/create_nfs_pvs.yml +++ b/roles/openshift_management/tasks/storage/create_nfs_pvs.yml @@ -26,7 +26,7 @@ when: - openshift_management_template_parameters.DATABASE_VOLUME_CAPACITY is not defined -- name: Check if the CFME App PV has been created +- name: Check if the Management App PV has been created oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -34,7 +34,7 @@ name: "{{ openshift_management_flavor_short }}-app" register: miq_app_pv_check -- name: Check if the CFME DB PV has been created +- name: Check if the Management DB PV has been created oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -44,7 +44,7 @@ when: - openshift_management_app_template in ['miq-template', 'cfme-template'] -- name: Ensure the CFME App PV is created +- name: Ensure the Management App PV is created oc_process: namespace: "{{ openshift_management_project }}" template_name: "{{ openshift_management_flavor }}-app-pv" @@ -55,7 +55,7 @@ NFS_HOST: "{{ openshift_management_nfs_server }}" when: miq_app_pv_check.results.results == [{}] -- name: Ensure the CFME DB PV is created +- name: Ensure the Management DB PV is created oc_process: namespace: "{{ openshift_management_project }}" template_name: "{{ openshift_management_flavor }}-db-pv" diff --git a/roles/openshift_management/tasks/template.yml b/roles/openshift_management/tasks/template.yml index 299158ac4..9f97cdcb9 100644 --- a/roles/openshift_management/tasks/template.yml +++ b/roles/openshift_management/tasks/template.yml @@ -15,7 +15,7 @@ # STANDARD PODIFIED DATABASE TEMPLATE - when: openshift_management_app_template in ['miq-template', 'cfme-template'] block: - - name: Check if the CFME Server template has been created already + - name: Check if the Management Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -25,12 +25,12 @@ - when: miq_server_check.results.results == [{}] block: - - name: Copy over CFME Server template + - name: Copy over Management Server template copy: src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-template.yaml" dest: "{{ template_dir }}/" - - name: Ensure CFME Server Template is created + - name: Ensure Management Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" name: "{{ openshift_management_flavor }}" @@ -41,9 +41,9 @@ ###################################################################### # EXTERNAL DATABASE TEMPLATE -- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template'] +- when: openshift_management_app_template in ['miq-template-ext-db', 'cfme-template-ext-db'] block: - - name: Check if the CFME Ext-DB Server template has been created already + - name: Check if the Management Ext-DB Server template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -53,12 +53,12 @@ - when: miq_ext_db_server_check.results.results == [{}] block: - - name: Copy over CFME Ext-DB Server template + - name: Copy over Management Ext-DB Server template copy: src: "templates/{{ openshift_management_flavor }}/{{openshift_management_flavor_short}}-template-ext-db.yaml" dest: "{{ template_dir }}/" - - name: Ensure CFME Ext-DB Server Template is created + - name: Ensure Management Ext-DB Server Template is created oc_obj: namespace: "{{ openshift_management_project }}" name: "{{ openshift_management_flavor }}-ext-db" @@ -74,7 +74,7 @@ # Begin conditional PV template creations # Required for the application server -- name: Check if the CFME App PV template has been created already +- name: Check if the Management App PV template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -84,12 +84,12 @@ - when: miq_app_pv_check.results.results == [{}] block: - - name: Copy over CFME App PV template + - name: Copy over Management App PV template copy: src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-server-example.yaml" dest: "{{ template_dir }}/" - - name: Ensure CFME App PV Template is created + - name: Ensure Management App PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" name: "{{ openshift_management_flavor }}-app-pv" @@ -103,7 +103,7 @@ # Required for database if the installation is fully podified - when: openshift_management_app_template in ['miq-template', 'cfme-template'] block: - - name: Check if the CFME DB PV template has been created already + - name: Check if the Management DB PV template has been created already oc_obj: namespace: "{{ openshift_management_project }}" state: list @@ -113,12 +113,12 @@ - when: miq_db_pv_check.results.results == [{}] block: - - name: Copy over CFME DB PV template + - name: Copy over Management DB PV template copy: src: "templates/{{ openshift_management_flavor }}/{{ openshift_management_flavor_short }}-pv-db-example.yaml" dest: "{{ template_dir }}/" - - name: Ensure CFME DB PV Template is created + - name: Ensure Management DB PV Template is created oc_obj: namespace: "{{ openshift_management_project }}" name: "{{ openshift_management_flavor }}-db-pv" diff --git a/roles/openshift_nfs/tasks/create_export.yml b/roles/openshift_nfs/tasks/create_export.yml index 39323904f..b0b888d56 100644 --- a/roles/openshift_nfs/tasks/create_export.yml +++ b/roles/openshift_nfs/tasks/create_export.yml @@ -12,7 +12,7 @@ # l_nfs_export_name: Name of sub-directory of the export # l_nfs_options: Mount Options -- name: Ensure CFME App NFS export directory exists +- name: "Ensure {{ l_nfs_export_name }} NFS export directory exists" file: path: "{{ l_nfs_base_dir }}/{{ l_nfs_export_name }}" state: directory -- cgit v1.2.3 From 7d88f8dada9f19dd6b49af9bb539e43aaa15f138 Mon Sep 17 00:00:00 2001 From: Tim Bielawa Date: Thu, 19 Oct 2017 16:28:53 -0400 Subject: Refactor adding multiple container providers --- roles/openshift_management/README.md | 28 +++++++++---------- .../filter_plugins/oo_management_filters.py | 27 +++++++++++++++++++ .../tasks/add_many_container_providers.yml | 26 +++++++++++++----- roles/openshift_management/tasks/main.yml | 4 +++ roles/openshift_management/tasks/storage/nfs.yml | 31 ---------------------- .../tasks/storage/nfs_server.yml | 31 ++++++++++++++++++++++ 6 files changed, 94 insertions(+), 53 deletions(-) create mode 100644 roles/openshift_management/filter_plugins/oo_management_filters.py create mode 100644 roles/openshift_management/tasks/storage/nfs_server.yml (limited to 'roles') diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index fb961e38e..817ad4daa 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -482,16 +482,14 @@ preview** script which allows you to add multiple container platforms as container providers in any arbitrary MIQ/CFME server. Using the multiple-provider script requires manual configuration and -the setting of an additional inventory parameter. +setting an `EXTRA_VARS` parameter on the command-line. 1. Copy the [container_providers.yml](files/examples/container_providers.yml) - example somewhere -1. Add the `openshift_management_many_container_providers_config` - parameter to your inventory and set the value to the full path of - the file you copied in `Step 1` -1. Update the `hostname`, `user`, and `password` parameters in the + example somewhere, such as `/tmp/cp.yml` +1. If you changed your CFME/MIQ name or password, update the + `hostname`, `user`, and `password` parameters in the `management_server` key in the `container_providers.yml` file copy 1. Fill in an entry under the `container_providers` key for *each* OCP or Origin cluster you want to add as container providers @@ -511,6 +509,7 @@ the setting of an additional inventory parameter. * `port` - Update this key if your OCP/Origin cluster runs the API on a port other than `8443` * `endpoint` - You may enable SSL verification (`verify_ssl`) or change the validation setting to `ssl-with-validation`. Support for custom trusted CA certificates is not available at this time. + Let's see an example describing the following scenario: * You copied `files/examples/container_providers.yml` to `/tmp/cp.yml` @@ -542,18 +541,15 @@ management_server: password: b3tt3r_p4SSw0rd ``` -Then you would add to your inventory file: - -```ini -[OSEv3:vars] -# ... -openshift_management_many_container_providers_config=/tmp/cp.yml -``` - -Finally, run the many-container-providers integration script: +Then you will run the many-container-providers integration script. You +**must** provide the path to the container providers configuration +file as an `EXTRA_VARS` parameter to `ansible-playbook`. Use the `-e` +(or `--extra-vars`) parameter to set `container_providers_config` to +the config file path. ``` -$ ansible-playbook -v -i roles/openshift_management/tasks/add_many_container_providers.yml +$ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ + roles/openshift_management/tasks/add_many_container_providers.yml ``` Afterwards you will find two new container providers in your diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py new file mode 100644 index 000000000..06bc0796d --- /dev/null +++ b/roles/openshift_management/filter_plugins/oo_management_filters.py @@ -0,0 +1,27 @@ +#!/usr/bin/python +# -*- coding: utf-8 -*- + +def oo_filter_container_providers(results): + """results - the result from posting the API calls for adding new +providers""" + all_results = [] + for result in results: + if 'results' in result['json']: + # We got an OK response + r = result['json']['results'][0] + all_results.append("Provider '{}' - Added successfully".format(r['name'])) + elif 'error' in result['json']: + # This was a problem + all_results.append("Provider '{}' - Failed to add. Message: {}".format( + result['item']['name'], result['json']['error']['message'])) + return all_results + +class FilterModule(object): + """ Custom ansible filter mapping """ + + # pylint: disable=no-self-use, too-few-public-methods + def filters(self): + """ returns a mapping of filters to methods """ + return { + "oo_filter_container_providers": oo_filter_container_providers, + } diff --git a/roles/openshift_management/tasks/add_many_container_providers.yml b/roles/openshift_management/tasks/add_many_container_providers.yml index f92c81277..10a1f4c83 100644 --- a/roles/openshift_management/tasks/add_many_container_providers.yml +++ b/roles/openshift_management/tasks/add_many_container_providers.yml @@ -1,9 +1,15 @@ --- -- hosts: "{{ groups['masters'][0] }}" +- hosts: localhost tasks: + - name: Ensure the container provider configuration is defined + assert: + that: container_providers_config is defined + msg: | + Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command + - name: Include providers/management configuration include_vars: - file: "{{ openshift_management_many_container_providers_config }}" + file: "{{ container_providers_config }}" - name: Ensure this cluster is a container provider uri: @@ -20,8 +26,16 @@ with_items: "{{ container_providers }}" register: results - - name: Ensure failed additions are reported for each container provider + # TODO: Make this prettier and easier to read + - name: Save results + copy: + dest: /tmp/results.json + content: "{{ results.results | to_nice_json }}" + # state: present + # debug: + # var: item.item + # with_items: "{{ results.results }}" + + - name: print each result debug: - msg: | - FLOOP {{ item.item.hostname }} - with_items: "{{ results.results }}" + msg: "{{ results.results | oo_filter_container_providers }}" diff --git a/roles/openshift_management/tasks/main.yml b/roles/openshift_management/tasks/main.yml index 88290c44d..9be923a57 100644 --- a/roles/openshift_management/tasks/main.yml +++ b/roles/openshift_management/tasks/main.yml @@ -22,6 +22,10 @@ ###################################################################### # STORAGE - Initialize basic storage class +- name: Determine the correct NFS host if required + include: storage/nfs_server.yml + when: openshift_management_storage_class in ['nfs', 'nfs_external'] + #--------------------------------------------------------------------- # * nfs - set up NFS shares on the first master for a proof of concept - name: Create required NFS exports for Management app storage diff --git a/roles/openshift_management/tasks/storage/nfs.yml b/roles/openshift_management/tasks/storage/nfs.yml index 696808328..94e11137c 100644 --- a/roles/openshift_management/tasks/storage/nfs.yml +++ b/roles/openshift_management/tasks/storage/nfs.yml @@ -2,37 +2,6 @@ # Tasks to statically provision NFS volumes # Include if not using dynamic volume provisioning -- name: Ensure we save the local NFS server if one is provided - set_fact: - openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" - when: - - openshift_management_storage_nfs_local_hostname is defined - - openshift_management_storage_nfs_local_hostname != False - - openshift_management_storage_class == "nfs" - -- name: Ensure we save the local NFS server - set_fact: - openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" - when: - - openshift_management_nfs_server is not defined - - openshift_management_storage_class == "nfs" - -- name: Ensure we save the external NFS server - set_fact: - openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" - when: - - openshift_management_storage_class == "nfs_external" - -- name: Failed NFS server detection - assert: - that: - - openshift_management_nfs_server is defined - msg: | - "Unable to detect an NFS server. The 'nfs_external' - openshift_management_storage_class option requires that you set - openshift_management_storage_nfs_external_hostname. NFS hosts detected - for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" - - name: Setting up NFS storage block: - name: Include the NFS Setup role tasks diff --git a/roles/openshift_management/tasks/storage/nfs_server.yml b/roles/openshift_management/tasks/storage/nfs_server.yml new file mode 100644 index 000000000..96a742c83 --- /dev/null +++ b/roles/openshift_management/tasks/storage/nfs_server.yml @@ -0,0 +1,31 @@ +--- +- name: Ensure we save the local NFS server if one is provided + set_fact: + openshift_management_nfs_server: "{{ openshift_management_storage_nfs_local_hostname }}" + when: + - openshift_management_storage_nfs_local_hostname is defined + - openshift_management_storage_nfs_local_hostname != False + - openshift_management_storage_class == "nfs" + +- name: Ensure we save the local NFS server + set_fact: + openshift_management_nfs_server: "{{ groups['oo_nfs_to_config'].0 }}" + when: + - openshift_management_nfs_server is not defined + - openshift_management_storage_class == "nfs" + +- name: Ensure we save the external NFS server + set_fact: + openshift_management_nfs_server: "{{ openshift_management_storage_nfs_external_hostname }}" + when: + - openshift_management_storage_class == "nfs_external" + +- name: Failed NFS server detection + assert: + that: + - openshift_management_nfs_server is defined + msg: | + "Unable to detect an NFS server. The 'nfs_external' + openshift_management_storage_class option requires that you set + openshift_management_storage_nfs_external_hostname. NFS hosts detected + for local nfs services: {{ groups['oo_nfs_to_config'] | join(', ') }}" -- cgit v1.2.3 From 6c3a0373e1a48f730e9a279a255c0f1a58613cc7 Mon Sep 17 00:00:00 2001 From: Andrew Butcher Date: Fri, 20 Oct 2017 15:44:11 -0400 Subject: Move add_many_container_providers.yml to playbooks/byo/openshift-management with a noop task include to load filter plugins. --- roles/openshift_management/README.md | 2 +- .../tasks/add_many_container_providers.yml | 41 ---------------------- roles/openshift_management/tasks/noop.yml | 1 + 3 files changed, 2 insertions(+), 42 deletions(-) delete mode 100644 roles/openshift_management/tasks/add_many_container_providers.yml create mode 100644 roles/openshift_management/tasks/noop.yml (limited to 'roles') diff --git a/roles/openshift_management/README.md b/roles/openshift_management/README.md index 817ad4daa..05ca27913 100644 --- a/roles/openshift_management/README.md +++ b/roles/openshift_management/README.md @@ -549,7 +549,7 @@ the config file path. ``` $ ansible-playbook -v -e container_providers_config=/tmp/cp.yml \ - roles/openshift_management/tasks/add_many_container_providers.yml + playbooks/byo/openshift-management/add_many_container_providers.yml ``` Afterwards you will find two new container providers in your diff --git a/roles/openshift_management/tasks/add_many_container_providers.yml b/roles/openshift_management/tasks/add_many_container_providers.yml deleted file mode 100644 index 10a1f4c83..000000000 --- a/roles/openshift_management/tasks/add_many_container_providers.yml +++ /dev/null @@ -1,41 +0,0 @@ ---- -- hosts: localhost - tasks: - - name: Ensure the container provider configuration is defined - assert: - that: container_providers_config is defined - msg: | - Error: Must provide providers config path. Fix: Add '-e container_providers_config=/path/to/your/config' to the ansible-playbook command - - - name: Include providers/management configuration - include_vars: - file: "{{ container_providers_config }}" - - - name: Ensure this cluster is a container provider - uri: - url: "https://{{ management_server['hostname'] }}/api/providers" - body_format: json - method: POST - user: "{{ management_server['user'] }}" - password: "{{ management_server['password'] }}" - validate_certs: no - # Docs on formatting the BODY of the POST request: - # http://manageiq.org/docs/reference/latest/api/reference/providers.html#specifying-connection-configurations - body: "{{ item }}" - failed_when: false - with_items: "{{ container_providers }}" - register: results - - # TODO: Make this prettier and easier to read - - name: Save results - copy: - dest: /tmp/results.json - content: "{{ results.results | to_nice_json }}" - # state: present - # debug: - # var: item.item - # with_items: "{{ results.results }}" - - - name: print each result - debug: - msg: "{{ results.results | oo_filter_container_providers }}" diff --git a/roles/openshift_management/tasks/noop.yml b/roles/openshift_management/tasks/noop.yml new file mode 100644 index 000000000..ed97d539c --- /dev/null +++ b/roles/openshift_management/tasks/noop.yml @@ -0,0 +1 @@ +--- -- cgit v1.2.3 From ac62ea0066934877f94e99bda6ec53a9c03ababb Mon Sep 17 00:00:00 2001 From: Tim Bielawa Date: Fri, 20 Oct 2017 16:17:41 -0400 Subject: Fix lint --- .../openshift_management/filter_plugins/oo_management_filters.py | 9 +++++++-- 1 file changed, 7 insertions(+), 2 deletions(-) (limited to 'roles') diff --git a/roles/openshift_management/filter_plugins/oo_management_filters.py b/roles/openshift_management/filter_plugins/oo_management_filters.py index 06bc0796d..3b7013d9a 100644 --- a/roles/openshift_management/filter_plugins/oo_management_filters.py +++ b/roles/openshift_management/filter_plugins/oo_management_filters.py @@ -1,5 +1,9 @@ #!/usr/bin/python # -*- coding: utf-8 -*- +""" +Filter methods for the management role +""" + def oo_filter_container_providers(results): """results - the result from posting the API calls for adding new @@ -8,14 +12,15 @@ providers""" for result in results: if 'results' in result['json']: # We got an OK response - r = result['json']['results'][0] - all_results.append("Provider '{}' - Added successfully".format(r['name'])) + res = result['json']['results'][0] + all_results.append("Provider '{}' - Added successfully".format(res['name'])) elif 'error' in result['json']: # This was a problem all_results.append("Provider '{}' - Failed to add. Message: {}".format( result['item']['name'], result['json']['error']['message'])) return all_results + class FilterModule(object): """ Custom ansible filter mapping """ -- cgit v1.2.3