diff options
Diffstat (limited to 'playbooks/openstack')
19 files changed, 118 insertions, 533 deletions
diff --git a/playbooks/openstack/README.md b/playbooks/openstack/README.md index f3d5b5aa8..875004cc9 100644 --- a/playbooks/openstack/README.md +++ b/playbooks/openstack/README.md @@ -38,6 +38,19 @@ Optional:  * External Neutron network with a floating IP address pool +## DNS Requirements + +OpenShift requires DNS to operate properly. OpenStack supports DNS-as-a-service +in the form of the Designate project, but the playbooks here don't support it +yet. Until we do, you will need to provide a DNS solution yourself (or in case +you are not running Designate when we do). + +If your server supports nsupdate, we will use it to add the necessary records. + +TODO(shadower): describe how to build a sample DNS server and how to configure +our playbooks for nsupdate. + +  ## Installation  There are four main parts to the installation: @@ -143,6 +156,8 @@ $ vi inventory/group_vars/all.yml  4. Set the `openstack_default_flavor` to the flavor you want your     OpenShift VMs to use.     - See `openstack flavor list` for the list of available flavors. +5. Set the `public_dns_nameservers` to the list of the IP addresses +   of the DNS servers used for the **private** address resolution[1].  **NOTE**: In most OpenStack environments, you will also need to  configure the forwarders for the DNS server we create. This depends on @@ -153,6 +168,9 @@ put the IP addresses into `public_dns_nameservers` in  `inventory/group_vars/all.yml`. +[1]: Yes, the name is bad. We will fix it. + +  #### OpenShift configuration  The OpenShift configuration is in `inventory/group_vars/OSEv3.yml`. diff --git a/playbooks/openstack/galaxy-requirements.yaml b/playbooks/openstack/galaxy-requirements.yaml deleted file mode 100644 index 1d745dcc3..000000000 --- a/playbooks/openstack/galaxy-requirements.yaml +++ /dev/null @@ -1,10 +0,0 @@ ---- -# This is the Ansible Galaxy requirements file to pull in the correct roles - -# From 'infra-ansible' -- src: https://github.com/redhat-cop/infra-ansible -  version: master - -# From 'openshift-ansible' -- src: https://github.com/openshift/openshift-ansible -  version: master diff --git a/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml b/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml deleted file mode 100644 index e11874c28..000000000 --- a/playbooks/openstack/openshift-cluster/custom_flavor_check.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Try to get flavor facts -  os_flavor_facts: -    name: "{{ flavor }}" -  register: flavor_result -- name: Check that custom flavor is available -  assert: -    that: "flavor_result.ansible_facts.openstack_flavors" -    msg: "Flavor {{ flavor }} is not available." diff --git a/playbooks/openstack/openshift-cluster/custom_image_check.yaml b/playbooks/openstack/openshift-cluster/custom_image_check.yaml deleted file mode 100644 index 452e1e4d8..000000000 --- a/playbooks/openstack/openshift-cluster/custom_image_check.yaml +++ /dev/null @@ -1,9 +0,0 @@ ---- -- name: Try to get image facts -  os_image_facts: -    image: "{{ image }}" -  register: image_result -- name: Check that custom image is available -  assert: -    that: "image_result.ansible_facts.openstack_image" -    msg: "Image {{ image }} is not available." diff --git a/playbooks/openstack/openshift-cluster/install.yml b/playbooks/openstack/openshift-cluster/install.yml new file mode 100644 index 000000000..40d4767ba --- /dev/null +++ b/playbooks/openstack/openshift-cluster/install.yml @@ -0,0 +1,18 @@ +--- +# NOTE(shadower): the AWS playbook builds an in-memory inventory of +# all the EC2 instances here. We don't need to as that's done by the +# dynamic inventory. + +# TODO(shadower): the AWS playbook sets the +# `openshift_master_cluster_hostname` and `osm_custom_cors_origins` +# values here. We do it in the OSEv3 group vars. Do we need to add +# some logic here? + +- name: normalize groups +  include: ../../byo/openshift-cluster/initialize_groups.yml + +- name: run the std_include +  include: ../../common/openshift-cluster/std_include.yml + +- name: run the config +  include: ../../common/openshift-cluster/config.yml diff --git a/playbooks/openstack/openshift-cluster/net_vars_check.yaml b/playbooks/openstack/openshift-cluster/net_vars_check.yaml deleted file mode 100644 index 68afde415..000000000 --- a/playbooks/openstack/openshift-cluster/net_vars_check.yaml +++ /dev/null @@ -1,14 +0,0 @@ ---- -- name: Check the provider network configuration -  fail: -    msg: "Flannel SDN requires a dedicated containers data network and can not work over a provider network" -  when: -    - openstack_provider_network_name is defined -    - openstack_private_data_network_name is defined - -- name: Check the flannel network configuration -  fail: -    msg: "A dedicated containers data network is only supported with Flannel SDN" -  when: -    - openstack_private_data_network_name is defined -    - not openshift_use_flannel|default(False)|bool diff --git a/playbooks/openstack/openshift-cluster/post-install.yml b/playbooks/openstack/openshift-cluster/post-install.yml index 417813e2a..7b1744a18 100644 --- a/playbooks/openstack/openshift-cluster/post-install.yml +++ b/playbooks/openstack/openshift-cluster/post-install.yml @@ -22,9 +22,9 @@      - when: openshift_use_flannel|default(False)|bool        block:          - include_role: -            name: openshift-ansible/roles/os_firewall +            name: os_firewall          - include_role: -            name: openshift-ansible/roles/lib_os_firewall +            name: lib_os_firewall          - name: set allow rules for dnsmasq            os_firewall_manage_iptables:              name: "{{ item.service }}" diff --git a/playbooks/openstack/openshift-cluster/post-provision-openstack.yml b/playbooks/openstack/openshift-cluster/post-provision-openstack.yml deleted file mode 100644 index e460fbf12..000000000 --- a/playbooks/openstack/openshift-cluster/post-provision-openstack.yml +++ /dev/null @@ -1,118 +0,0 @@ ---- -- hosts: cluster_hosts -  name: Wait for the the nodes to come up -  become: False -  gather_facts: False -  tasks: -    - when: not openstack_use_bastion|default(False)|bool -      wait_for_connection: -    - when: openstack_use_bastion|default(False)|bool -      delegate_to: bastion -      wait_for_connection: - -- hosts: cluster_hosts -  gather_facts: True -  tasks: -    - name: Debug hostvar -      debug: -        msg: "{{ hostvars[inventory_hostname] }}" -        verbosity: 2 - -- name: OpenShift Pre-Requisites (part 1) -  include: pre-install.yml - -- name: Assign hostnames -  hosts: cluster_hosts -  gather_facts: False -  become: true -  roles: -    - role: hostnames - -- name: Subscribe DNS Host to allow for configuration below -  hosts: dns -  gather_facts: False -  become: true -  roles: -    - role: subscription-manager -      when: hostvars.localhost.rhsm_register|default(False) -      tags: 'subscription-manager' - -- name: Determine which DNS server(s) to use for our generated records -  hosts: localhost -  gather_facts: False -  become: False -  roles: -    - dns-server-detect - -- name: Build the DNS Server Views and Configure DNS Server(s) -  hosts: dns -  gather_facts: False -  become: true -  roles: -    - role: dns-views -    - role: infra-ansible/roles/dns-server - -- name: Build and process DNS Records -  hosts: localhost -  gather_facts: True -  become: False -  roles: -    - role: dns-records -      use_bastion: "{{ openstack_use_bastion|default(False)|bool }}" -    - role: infra-ansible/roles/dns - -- name: Switch the stack subnet to the configured private DNS server -  hosts: localhost -  gather_facts: False -  become: False -  vars_files: -    - stack_params.yaml -  tasks: -    - include_role: -        name: openstack-stack -        tasks_from: subnet_update_dns_servers - -- name: OpenShift Pre-Requisites (part 2) -  hosts: OSEv3 -  gather_facts: true -  become: true -  vars: -    interface: "{{ flannel_interface|default('eth1') }}" -    interface_file: /etc/sysconfig/network-scripts/ifcfg-{{ interface }} -    interface_config: -      DEVICE: "{{ interface }}" -      TYPE: Ethernet -      BOOTPROTO: dhcp -      ONBOOT: 'yes' -      DEFTROUTE: 'no' -      PEERDNS: 'no' -  pre_tasks: -    - name: "Include DNS configuration to ensure proper name resolution" -      lineinfile: -        state: present -        dest: /etc/sysconfig/network -        regexp: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" -        line: "IP4_NAMESERVERS={{ hostvars['localhost'].private_dns_server }}" -    - name: "Configure the flannel interface options" -      when: openshift_use_flannel|default(False)|bool -      block: -        - file: -            dest: "{{ interface_file }}" -            state: touch -            mode: 0644 -            owner: root -            group: root -        - lineinfile: -            state: present -            dest: "{{ interface_file }}" -            regexp: "{{ item.key }}=" -            line: "{{ item.key }}={{ item.value }}" -          with_dict: "{{ interface_config }}" -  roles: -    - node-network-manager - -- include: prepare-and-format-cinder-volume.yaml -  when: > -    prepare_and_format_registry_volume|default(False) or -    (cinder_registry_volume is defined and -      cinder_registry_volume.changed|default(False)) diff --git a/playbooks/openstack/openshift-cluster/pre-install.yml b/playbooks/openstack/openshift-cluster/pre-install.yml deleted file mode 100644 index c9f333b92..000000000 --- a/playbooks/openstack/openshift-cluster/pre-install.yml +++ /dev/null @@ -1,21 +0,0 @@ ---- -############################### -# OpenShift Pre-Requisites - -# - subscribe hosts -# - prepare docker -# - other prep (install additional packages, etc.) -# -- hosts: OSEv3 -  become: true -  roles: -    - { role: subscription-manager, when: hostvars.localhost.rhsm_register|default(False), tags: 'subscription-manager', ansible_sudo: true } -    - role: docker-storage-setup -      docker_dev: /dev/vdb -      tags: 'docker' -    - { role: openshift-prep, tags: 'openshift-prep' } - -- hosts: localhost:cluster_hosts -  become: False -  tasks: -    - include: pre_tasks.yml diff --git a/playbooks/openstack/openshift-cluster/pre_tasks.yml b/playbooks/openstack/openshift-cluster/pre_tasks.yml deleted file mode 100644 index 11fe2dd84..000000000 --- a/playbooks/openstack/openshift-cluster/pre_tasks.yml +++ /dev/null @@ -1,53 +0,0 @@ ---- -- name: Generate Environment ID -  set_fact: -    env_random_id: "{{ ansible_date_time.epoch }}" -  run_once: true -  delegate_to: localhost - -- name: Set default Environment ID -  set_fact: -    default_env_id: "openshift-{{ lookup('env','OS_USERNAME') }}-{{ env_random_id }}" -  delegate_to: localhost - -- name: Setting Common Facts -  set_fact: -    env_id: "{{ env_id | default(default_env_id) }}" -  delegate_to: localhost - -- name: Updating DNS domain to include env_id (if not empty) -  set_fact: -    full_dns_domain: "{{ (env_id|trim == '') | ternary(public_dns_domain, env_id + '.' + public_dns_domain) }}" -  delegate_to: localhost - -- name: Set the APP domain for OpenShift use -  set_fact: -    openshift_app_domain: "{{ openshift_app_domain | default('apps') }}" -  delegate_to: localhost - -- name: Set the default app domain for routing purposes -  set_fact: -    openshift_master_default_subdomain: "{{ openshift_app_domain }}.{{ full_dns_domain }}" -  delegate_to: localhost -  when: -  - openshift_master_default_subdomain is undefined - -# Check that openshift_cluster_node_labels has regions defined for all groups -# NOTE(kpilatov): if node labels are to be enabled for more groups, -#                 this check needs to be modified as well -- name: Set openshift_cluster_node_labels if undefined (should not happen) -  set_fact: -    openshift_cluster_node_labels: {'app': {'region': 'primary'}, 'infra': {'region': 'infra'}} -  when: openshift_cluster_node_labels is not defined - -- name: Set openshift_cluster_node_labels for the infra group -  set_fact: -    openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'infra': {'region': 'infra'}}, recursive=True) }}" - -- name: Set openshift_cluster_node_labels for the app group -  set_fact: -    openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'region': 'primary'}}, recursive=True) }}" - -- name: Set openshift_cluster_node_labels for auto-scaling app nodes -  set_fact: -    openshift_cluster_node_labels: "{{ openshift_cluster_node_labels | combine({'app': {'autoscaling': 'app'}}, recursive=True) }}" diff --git a/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml b/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml deleted file mode 100644 index 30e094459..000000000 --- a/playbooks/openstack/openshift-cluster/prepare-and-format-cinder-volume.yaml +++ /dev/null @@ -1,67 +0,0 @@ ---- -- hosts: localhost -  gather_facts: False -  become: False -  tasks: -  - set_fact: -      cinder_volume: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_volumeID }}" -      cinder_fs: "{{ hostvars[groups.masters[0]].openshift_hosted_registry_storage_openstack_filesystem }}" - -  - name: Attach the volume to the VM -    os_server_volume: -      state: present -      server: "{{ groups['masters'][0] }}" -      volume: "{{ cinder_volume }}" -    register: volume_attachment - -  - set_fact: -      attached_device: >- -        {{ volume_attachment['attachments']|json_query("[?volume_id=='" + cinder_volume + "'].device | [0]") }} - -  - delegate_to: "{{ groups['masters'][0] }}" -    block: -    - name: Wait for the device to appear -      wait_for: path={{ attached_device }} - -    - name: Create a temp directory for mounting the volume -      tempfile: -        prefix: cinder-volume -        state: directory -      register: cinder_mount_dir - -    - name: Format the device -      filesystem: -        fstype: "{{ cinder_fs }}" -        dev: "{{ attached_device }}" - -    - name: Mount the device -      mount: -        name: "{{ cinder_mount_dir.path }}" -        src: "{{ attached_device }}" -        state: mounted -        fstype: "{{ cinder_fs }}" - -    - name: Change mode on the filesystem -      file: -        path: "{{ cinder_mount_dir.path }}" -        state: directory -        recurse: true -        mode: 0777 - -    - name: Unmount the device -      mount: -        name: "{{ cinder_mount_dir.path }}" -        src: "{{ attached_device }}" -        state: absent -        fstype: "{{ cinder_fs }}" - -    - name: Delete the temp directory -      file: -        name: "{{ cinder_mount_dir.path }}" -        state: absent - -  - name: Detach the volume from the VM -    os_server_volume: -      state: absent -      server: "{{ groups['masters'][0] }}" -      volume: "{{ cinder_volume }}" diff --git a/playbooks/openstack/openshift-cluster/prerequisites.yml b/playbooks/openstack/openshift-cluster/prerequisites.yml index 11a31411e..0356b37dd 100644 --- a/playbooks/openstack/openshift-cluster/prerequisites.yml +++ b/playbooks/openstack/openshift-cluster/prerequisites.yml @@ -1,123 +1,12 @@  ---  - hosts: localhost    tasks: - -  # Sanity check of inventory variables -  - include: net_vars_check.yaml - -  # Check ansible -  - name: Check Ansible version -    assert: -      that: > -        (ansible_version.major == 2 and ansible_version.minor >= 3) or -        (ansible_version.major > 2) -      msg: "Ansible version must be at least 2.3" - -  # Check shade -  - name: Try to import python module shade -    command: python -c "import shade" -    ignore_errors: yes -    register: shade_result -  - name: Check if shade is installed -    assert: -      that: 'shade_result.rc == 0' -      msg: "Python module shade is not installed" - -  # Check jmespath -  - name: Try to import python module shade -    command: python -c "import jmespath" -    ignore_errors: yes -    register: jmespath_result -  - name: Check if jmespath is installed -    assert: -      that: 'jmespath_result.rc == 0' -      msg: "Python module jmespath is not installed" - -  # Check python-dns -  - name: Try to import python DNS module -    command: python -c "import dns" -    ignore_errors: yes -    register: pythondns_result -  - name: Check if python-dns is installed -    assert: -      that: 'pythondns_result.rc == 0' -      msg: "Python module python-dns is not installed" - -  # Check jinja2 -  - name: Try to import jinja2 module -    command: python -c "import jinja2" -    ignore_errors: yes -    register: jinja_result -  - name: Check if jinja2 is installed -    assert: -      that: 'jinja_result.rc == 0' -      msg: "Python module jinja2 is not installed" - -  # Check Glance image -  - name: Try to get image facts -    os_image_facts: -      image: "{{ openstack_default_image_name }}" -    register: image_result -  - name: Check that image is available -    assert: -      that: "image_result.ansible_facts.openstack_image" -      msg: "Image {{ openstack_default_image_name }} is not available" - -  # Check network name -  - name: Try to get network facts -    os_networks_facts: -      name: "{{ openstack_external_network_name }}" -    register: network_result -    when: not openstack_provider_network_name|default(None) -  - name: Check that network is available -    assert: -      that: "network_result.ansible_facts.openstack_networks" -      msg: "Network {{ openstack_external_network_name }} is not available" -    when: not openstack_provider_network_name|default(None) - -  # Check keypair -  # TODO kpilatov: there is no Ansible module for getting OS keypairs -  #                (os_keypair is not suitable for this) -  #                this method does not force python-openstackclient dependency -  - name: Try to show keypair -    command: > -             python -c 'import shade; cloud = shade.openstack_cloud(); -             exit(cloud.get_keypair("{{ openstack_ssh_public_key }}") is None)' -    ignore_errors: yes -    register: key_result -  - name: Check that keypair is available -    assert: -      that: 'key_result.rc == 0' -      msg: "Keypair {{ openstack_ssh_public_key }} is not available" - -# Check that custom images and flavors exist -- hosts: localhost - -  # Include variables that will be used by heat -  vars_files: -  - stack_params.yaml - -  tasks: -  # Check that custom images are available -  - include: custom_image_check.yaml -    with_items: -    - "{{ openstack_master_image }}" -    - "{{ openstack_infra_image }}" -    - "{{ openstack_node_image }}" -    - "{{ openstack_lb_image }}" -    - "{{ openstack_etcd_image }}" -    - "{{ openstack_dns_image }}" -    loop_control: -      loop_var: image - -  # Check that custom flavors are available -  - include: custom_flavor_check.yaml -    with_items: -    - "{{ master_flavor }}" -    - "{{ infra_flavor }}" -    - "{{ node_flavor }}" -    - "{{ lb_flavor }}" -    - "{{ etcd_flavor }}" -    - "{{ dns_flavor }}" -    loop_control: -      loop_var: flavor +  - name: Check dependencies and OpenStack prerequisites +    include_role: +      name: openshift_openstack +      tasks_from: check-prerequisites.yml + +  - name: Check network configuration +    include_role: +      name: openshift_openstack +      tasks_from: net_vars_check.yaml diff --git a/playbooks/openstack/openshift-cluster/provision-openstack.yml b/playbooks/openstack/openshift-cluster/provision-openstack.yml deleted file mode 100644 index bf424676d..000000000 --- a/playbooks/openstack/openshift-cluster/provision-openstack.yml +++ /dev/null @@ -1,35 +0,0 @@ ---- -- hosts: localhost -  gather_facts: True -  become: False -  vars_files: -    - stack_params.yaml -  pre_tasks: -    - include: pre_tasks.yml -  roles: -    - role: openstack-stack -    - role: openstack-create-cinder-registry -      when: -        - cinder_hosted_registry_name is defined -        - cinder_hosted_registry_size_gb is defined -    - role: static_inventory -      when: openstack_inventory|default('static') == 'static' -      inventory_path: "{{ openstack_inventory_path|default(inventory_dir) }}" -      private_ssh_key: "{{ openstack_private_ssh_key|default('') }}" -      ssh_config_path: "{{ openstack_ssh_config_path|default('/tmp/ssh.config.openshift.ansible' + '.' + stack_name) }}" -      ssh_user: "{{ ansible_user }}" - -- name: Refresh Server inventory or exit to apply SSH config -  hosts: localhost -  connection: local -  become: False -  gather_facts: False -  tasks: -    - name: Exit to apply SSH config for a bastion -      meta: end_play -      when: openstack_use_bastion|default(False)|bool -    - name: Refresh Server inventory -      meta: refresh_inventory - -- include: post-provision-openstack.yml -  when: not openstack_use_bastion|default(False)|bool diff --git a/playbooks/openstack/openshift-cluster/provision.yaml b/playbooks/openstack/openshift-cluster/provision.yaml deleted file mode 100644 index 474c9c803..000000000 --- a/playbooks/openstack/openshift-cluster/provision.yaml +++ /dev/null @@ -1,4 +0,0 @@ ---- -- include: "prerequisites.yml" - -- include: "provision-openstack.yml" diff --git a/playbooks/openstack/openshift-cluster/provision.yml b/playbooks/openstack/openshift-cluster/provision.yml new file mode 100644 index 000000000..5b20d5720 --- /dev/null +++ b/playbooks/openstack/openshift-cluster/provision.yml @@ -0,0 +1,37 @@ +--- +- name: Create the OpenStack resources for cluster installation +  hosts: localhost +  tasks: +  - name: provision cluster +    include_role: +      name: openshift_openstack +      tasks_from: provision.yml + +# NOTE(shadower): the (internal) DNS must be functional at this point!! +# That will have happened in provision.yml if nsupdate was configured. + +# TODO(shadower): consider splitting this up so people can stop here +# and configure their DNS if they have to. + +- name: Prepare the Nodes in the cluster for installation +  hosts: cluster_hosts +  become: true +  # NOTE: The nodes may not be up yet, don't gather facts here. +  # They'll be collected after `wait_for_connection`. +  gather_facts: no +  tasks: +  - name: Wait for the the nodes to come up +    wait_for_connection: + +  - name: Gather facts for the new nodes +    setup: + +  - name: Install dependencies +    include_role: +      name: openshift_openstack +      tasks_from: node-packages.yml + +  - name: Configure Node +    include_role: +      name: openshift_openstack +      tasks_from: node-configuration.yml diff --git a/playbooks/openstack/openshift-cluster/provision_install.yml b/playbooks/openstack/openshift-cluster/provision_install.yml new file mode 100644 index 000000000..5d88c105f --- /dev/null +++ b/playbooks/openstack/openshift-cluster/provision_install.yml @@ -0,0 +1,9 @@ +--- +- name: Check the prerequisites for cluster provisioning in OpenStack +  include: prerequisites.yml + +- name: Include the provision.yml playbook to create cluster +  include: provision.yml + +- name: Include the install.yml playbook to install cluster +  include: install.yml diff --git a/playbooks/openstack/openshift-cluster/scale-up.yaml b/playbooks/openstack/openshift-cluster/scale-up.yaml index 79fc09050..f99ff1349 100644 --- a/playbooks/openstack/openshift-cluster/scale-up.yaml +++ b/playbooks/openstack/openshift-cluster/scale-up.yaml @@ -41,21 +41,16 @@        openstack_num_nodes: "{{ oc_old_num_nodes | int + increment_by | int }}"  # Run provision.yaml with higher number of nodes to create a new app-node VM -- include: provision.yaml +- include: provision.yml  # Run config.yml to perform openshift installation -# Path to openshift-ansible can be customised: -# - the value of openshift_ansible_dir has to be an absolute path -# - the path cannot contain the '/' symbol at the end  # Creating a new deployment by the full installation -- include: "{{ openshift_ansible_dir }}/playbooks/byo/config.yml" -  vars: -    openshift_ansible_dir: ../../../../openshift-ansible +- include: install.yml    when: 'not groups["new_nodes"] | list'  # Scaling up existing deployment -- include: "{{ openshift_ansible_dir }}/playbooks/byo/openshift-node/scaleup.yml" +- include: "../../byo/openshift-node/scaleup.yml"    vars:      openshift_ansible_dir: ../../../../openshift-ansible    when: 'groups["new_nodes"] | list' diff --git a/playbooks/openstack/openshift-cluster/stack_params.yaml b/playbooks/openstack/openshift-cluster/stack_params.yaml deleted file mode 100644 index a4da31bfe..000000000 --- a/playbooks/openstack/openshift-cluster/stack_params.yaml +++ /dev/null @@ -1,49 +0,0 @@ ---- -stack_name: "{{ env_id }}.{{ public_dns_domain }}" -dns_domain: "{{ public_dns_domain }}" -dns_nameservers: "{{ public_dns_nameservers }}" -subnet_prefix: "{{ openstack_subnet_prefix }}" -master_hostname: "{{ openstack_master_hostname | default('master') }}" -infra_hostname: "{{ openstack_infra_hostname | default('infra-node') }}" -node_hostname: "{{ openstack_node_hostname | default('app-node') }}" -lb_hostname: "{{ openstack_lb_hostname | default('lb') }}" -etcd_hostname: "{{ openstack_etcd_hostname | default('etcd') }}" -dns_hostname: "{{ openstack_dns_hostname | default('dns') }}" -ssh_public_key: "{{ openstack_ssh_public_key }}" -openstack_image: "{{ openstack_default_image_name }}" -lb_flavor: "{{ openstack_lb_flavor | default(openstack_default_flavor) }}" -etcd_flavor: "{{ openstack_etcd_flavor | default(openstack_default_flavor) }}" -master_flavor: "{{ openstack_master_flavor | default(openstack_default_flavor) }}" -node_flavor: "{{ openstack_node_flavor | default(openstack_default_flavor) }}" -infra_flavor: "{{ openstack_infra_flavor | default(openstack_default_flavor) }}" -dns_flavor: "{{ openstack_dns_flavor | default(openstack_default_flavor) }}" -openstack_master_image: "{{ openstack_master_image_name | default(openstack_default_image_name) }}" -openstack_infra_image: "{{ openstack_infra_image_name | default(openstack_default_image_name) }}" -openstack_node_image: "{{ openstack_node_image_name | default(openstack_default_image_name) }}" -openstack_lb_image: "{{ openstack_lb_image_name | default(openstack_default_image_name) }}" -openstack_etcd_image: "{{ openstack_etcd_image_name | default(openstack_default_image_name) }}" -openstack_dns_image: "{{ openstack_dns_image_name | default(openstack_default_image_name) }}" -openstack_private_network: >- -  {% if openstack_provider_network_name | default(None) -%} -  {{ openstack_provider_network_name }} -  {%- else -%} -  {{ openstack_private_network_name | default ('openshift-ansible-' + stack_name + '-net') }} -  {%- endif -%} -provider_network: "{{ openstack_provider_network_name | default(None) }}" -external_network: "{{ openstack_external_network_name | default(None) }}" -num_etcd: "{{ openstack_num_etcd | default(0) }}" -num_masters: "{{ openstack_num_masters }}" -num_nodes: "{{ openstack_num_nodes }}" -num_infra: "{{ openstack_num_infra }}" -num_dns: "{{ openstack_num_dns | default(1) }}" -master_server_group_policies: "{{ openstack_master_server_group_policies | default([]) | to_yaml }}" -infra_server_group_policies: "{{ openstack_infra_server_group_policies | default([]) | to_yaml }}" -master_volume_size: "{{ docker_master_volume_size | default(docker_volume_size) }}" -infra_volume_size: "{{ docker_infra_volume_size | default(docker_volume_size) }}" -node_volume_size: "{{ docker_node_volume_size | default(docker_volume_size) }}" -etcd_volume_size: "{{ docker_etcd_volume_size | default('2') }}" -dns_volume_size: "{{ docker_dns_volume_size | default('1') }}" -lb_volume_size: "{{ docker_lb_volume_size | default('5') }}" -nodes_to_remove: "{{ openstack_nodes_to_remove | default([]) |  to_yaml }}" -use_bastion: "{{ openstack_use_bastion|default(False) }}" -ui_ssh_tunnel: "{{ openshift_ui_ssh_tunnel|default(False) }}" diff --git a/playbooks/openstack/sample-inventory/inventory.py b/playbooks/openstack/sample-inventory/inventory.py index 6a1b74b3d..47c56d94d 100755 --- a/playbooks/openstack/sample-inventory/inventory.py +++ b/playbooks/openstack/sample-inventory/inventory.py @@ -1,4 +1,11 @@  #!/usr/bin/env python +""" +This is an Ansible dynamic inventory for OpenStack. + +It requires your OpenStack credentials to be set in clouds.yaml or your shell +environment. + +"""  from __future__ import print_function @@ -7,7 +14,8 @@ import json  import shade -if __name__ == '__main__': +def build_inventory(): +    '''Build the dynamic inventory.'''      cloud = shade.openstack_cloud()      inventory = {} @@ -39,13 +47,10 @@ if __name__ == '__main__':      dns = [server.name for server in cluster_hosts             if server.metadata['host-type'] == 'dns'] -    lb = [server.name for server in cluster_hosts -          if server.metadata['host-type'] == 'lb'] +    load_balancers = [server.name for server in cluster_hosts +                      if server.metadata['host-type'] == 'lb'] -    osev3 = list(set(nodes + etcd + lb)) - -    groups = [server.metadata.group for server in cluster_hosts -              if 'group' in server.metadata] +    osev3 = list(set(nodes + etcd + load_balancers))      inventory['cluster_hosts'] = {'hosts': [s.name for s in cluster_hosts]}      inventory['OSEv3'] = {'hosts': osev3} @@ -55,7 +60,7 @@ if __name__ == '__main__':      inventory['infra_hosts'] = {'hosts': infra_hosts}      inventory['app'] = {'hosts': app}      inventory['dns'] = {'hosts': dns} -    inventory['lb'] = {'hosts': lb} +    inventory['lb'] = {'hosts': load_balancers}      for server in cluster_hosts:          if 'group' in server.metadata: @@ -68,21 +73,24 @@ if __name__ == '__main__':      for server in cluster_hosts:          ssh_ip_address = server.public_v4 or server.private_v4 -        vars = { +        hostvars = {              'ansible_host': ssh_ip_address          }          public_v4 = server.public_v4 or server.private_v4          if public_v4: -            vars['public_v4'] = public_v4 +            hostvars['public_v4'] = public_v4          # TODO(shadower): what about multiple networks?          if server.private_v4: -            vars['private_v4'] = server.private_v4 +            hostvars['private_v4'] = server.private_v4          node_labels = server.metadata.get('node_labels')          if node_labels: -            vars['openshift_node_labels'] = node_labels +            hostvars['openshift_node_labels'] = node_labels + +        inventory['_meta']['hostvars'][server.name] = hostvars +    return inventory -        inventory['_meta']['hostvars'][server.name] = vars -    print(json.dumps(inventory, indent=4, sort_keys=True)) +if __name__ == '__main__': +    print(json.dumps(build_inventory(), indent=4, sort_keys=True))  | 
