summaryrefslogtreecommitdiffstats
path: root/playbooks
diff options
context:
space:
mode:
Diffstat (limited to 'playbooks')
-rw-r--r--playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml11
-rw-r--r--playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py41
-rw-r--r--playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml206
-rw-r--r--playbooks/adhoc/upgrades/README.md21
l---------playbooks/adhoc/upgrades/filter_plugins1
l---------playbooks/adhoc/upgrades/lookup_plugins1
l---------playbooks/adhoc/upgrades/roles1
-rw-r--r--playbooks/adhoc/upgrades/upgrade.yml115
-rw-r--r--playbooks/aws/openshift-cluster/tasks/launch_instances.yml1
-rw-r--r--playbooks/libvirt/openshift-cluster/templates/network.xml2
10 files changed, 389 insertions, 11 deletions
diff --git a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
index c9ae923bb..b6a2d2f26 100644
--- a/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
+++ b/playbooks/adhoc/docker_loopback_to_lvm/docker_loopback_to_direct_lvm.yml
@@ -27,9 +27,8 @@
gather_facts: no
vars:
- cli_volume_type: io1
+ cli_volume_type: gp2
cli_volume_size: 30
- cli_volume_iops: "{{ 30 * cli_volume_size }}"
pre_tasks:
- fail:
@@ -104,7 +103,6 @@
volume_size: "{{ cli_volume_size | default(30, True)}}"
volume_type: "{{ cli_volume_type }}"
device_name: /dev/xvdb
- iops: "{{ 30 * cli_volume_size }}"
register: vol
- debug: var=vol
@@ -142,10 +140,3 @@
- debug: var=dockerstart
- - name: Wait for docker to stabilize
- pause:
- seconds: 30
-
- # leaving off the '-t' for docker exec. With it, it doesn't work with ansible and tty support
- - name: update zabbix docker items
- command: docker exec -i oso-rhel7-zagg-client /usr/local/bin/cron-send-docker-metrics.py
diff --git a/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
new file mode 100644
index 000000000..d0264cde9
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/filter_plugins/oo_filters.py
@@ -0,0 +1,41 @@
+#!/usr/bin/python
+# -*- coding: utf-8 -*-
+# vim: expandtab:tabstop=4:shiftwidth=4
+'''
+Custom filters for use in openshift-ansible
+'''
+
+import pdb
+
+
+class FilterModule(object):
+ ''' Custom ansible filters '''
+
+ @staticmethod
+ def oo_pdb(arg):
+ ''' This pops you into a pdb instance where arg is the data passed in
+ from the filter.
+ Ex: "{{ hostvars | oo_pdb }}"
+ '''
+ pdb.set_trace()
+ return arg
+
+ @staticmethod
+ def translate_volume_name(volumes, target_volume):
+ '''
+ This filter matches a device string /dev/sdX to /dev/xvdX
+ It will then return the AWS volume ID
+ '''
+ for vol in volumes:
+ translated_name = vol["attachment_set"]["device"].replace("/dev/sd", "/dev/xvd")
+ if target_volume.startswith(translated_name):
+ return vol["id"]
+
+ return None
+
+
+ def filters(self):
+ ''' returns a mapping of filters to methods '''
+ return {
+ "translate_volume_name": self.translate_volume_name,
+ }
diff --git a/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
new file mode 100644
index 000000000..ef9b45abd
--- /dev/null
+++ b/playbooks/adhoc/grow_docker_vg/grow_docker_vg.yml
@@ -0,0 +1,206 @@
+---
+# This playbook grows the docker VG on a node by:
+# * add a new volume
+# * add volume to the existing VG.
+# * pv move to the new volume.
+# * remove old volume
+# * detach volume
+# * mark old volume in AWS with "REMOVE ME" tag
+# * grow docker LVM to 90% of the VG
+#
+# To run:
+# 1. Source your AWS credentials (make sure it's the corresponding AWS account) into your environment
+# export AWS_ACCESS_KEY_ID='XXXXX'
+# export AWS_SECRET_ACCESS_KEY='XXXXXX'
+#
+# 2. run the playbook:
+# ansible-playbook -e 'cli_tag_name=<tag-name>' grow_docker_vg.yml
+#
+# Example:
+# ansible-playbook -e 'cli_tag_name=ops-compute-12345' grow_docker_vg.yml
+#
+# Notes:
+# * By default this will do a 55GB GP2 volume. The can be overidden with the "-e 'cli_volume_size=100'" variable
+# * This does a GP2 by default. Support for Provisioned IOPS has not been added
+# * This will assign the new volume to /dev/xvdc. This is not variablized, yet.
+# * This can be done with NO downtime on the host
+# * This playbook assumes that there is a Logical Volume that is installed and called "docker-pool". This is
+# the LV that gets created via the "docker-storage-setup" command
+#
+
+- name: Grow the docker volume group
+ hosts: "tag_Name_{{ cli_tag_name }}"
+ user: root
+ connection: ssh
+ gather_facts: no
+
+ vars:
+ cli_volume_type: gp2
+ cli_volume_size: 55
+# cli_volume_iops: "{{ 30 * cli_volume_size }}"
+
+ pre_tasks:
+ - fail:
+ msg: "This playbook requires {{item}} to be set."
+ when: "{{ item }} is not defined or {{ item }} == ''"
+ with_items:
+ - cli_tag_name
+ - cli_volume_size
+
+ - debug:
+ var: hosts
+
+ - name: start docker
+ service:
+ name: docker
+ state: started
+
+ - name: Determine if Storage Driver (docker info) is devicemapper
+ shell: docker info | grep 'Storage Driver:.*devicemapper'
+ register: device_mapper_check
+ ignore_errors: yes
+
+ - debug:
+ var: device_mapper_check
+
+ - name: fail if we don't detect devicemapper
+ fail:
+ msg: The "Storage Driver" in "docker info" is not set to "devicemapper"! Please investigate manually.
+ when: device_mapper_check.rc == 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the volume group.
+ - name: Attempt to find the Volume Group that docker is using
+ shell: lvs | grep docker-pool | awk '{print $2}'
+ register: docker_vg_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_vg_name
+
+ - name: fail if we don't find a docker volume group
+ fail:
+ msg: Unable to find docker volume group. Please investigate manually.
+ when: docker_vg_name.stdout_lines|length != 1
+
+ # docker-storage-setup creates a docker-pool as the lvm. I am using docker-pool lvm to test
+ # and find the physical volume.
+ - name: Attempt to find the Phyisical Volume that docker is using
+ shell: "pvs | grep {{ docker_vg_name.stdout }} | awk '{print $1}'"
+ register: docker_pv_name
+ ignore_errors: yes
+
+ - debug:
+ var: docker_pv_name
+
+ - name: fail if we don't find a docker physical volume
+ fail:
+ msg: Unable to find docker physical volume. Please investigate manually.
+ when: docker_pv_name.stdout_lines|length != 1
+
+
+ - name: get list of volumes from AWS
+ delegate_to: localhost
+ ec2_vol:
+ state: list
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ register: attached_volumes
+
+ - debug: var=attached_volumes
+
+ - name: get volume id of current docker volume
+ set_fact:
+ old_docker_volume_id: "{{ attached_volumes.volumes | translate_volume_name(docker_pv_name.stdout) }}"
+
+ - debug: var=old_docker_volume_id
+
+ - name: check to see if /dev/xvdc exists
+ command: test -e /dev/xvdc
+ register: xvdc_check
+ ignore_errors: yes
+
+ - debug: var=xvdc_check
+
+ - name: fail if /dev/xvdc already exists
+ fail:
+ msg: /dev/xvdc already exists. Please investigate
+ when: xvdc_check.rc == 0
+
+ - name: Create a volume and attach it
+ delegate_to: localhost
+ ec2_vol:
+ state: present
+ instance: "{{ ec2_id }}"
+ region: "{{ ec2_region }}"
+ volume_size: "{{ cli_volume_size | default(30, True)}}"
+ volume_type: "{{ cli_volume_type }}"
+ device_name: /dev/xvdc
+ register: create_volume
+
+ - debug: var=create_volume
+
+ - name: Fail when problems creating volumes and attaching
+ fail:
+ msg: "Failed to create or attach volume msg: {{ create_volume.msg }}"
+ when: create_volume.msg is defined
+
+ - name: tag the vol with a name
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{ create_volume.volume_id }}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }}"
+ env: "{{ ec2_tag_environment }}"
+ register: voltags
+
+ - name: check for attached drive
+ command: test -b /dev/xvdc
+ register: attachment_check
+ until: attachment_check.rc == 0
+ retries: 30
+ delay: 2
+
+ - name: partition the new drive and make it lvm
+ command: parted /dev/xvdc --script -- mklabel msdos mkpart primary 0% 100% set 1 lvm
+
+ - name: pvcreate /dev/xvdc
+ command: pvcreate /dev/xvdc1
+
+ - name: Extend the docker volume group
+ command: vgextend "{{ docker_vg_name.stdout }}" /dev/xvdc1
+
+ - name: pvmove onto new volume
+ command: "pvmove {{ docker_pv_name.stdout }} /dev/xvdc1"
+ async: 3600
+ poll: 10
+
+ - name: Remove the old docker drive from the volume group
+ command: "vgreduce {{ docker_vg_name.stdout }} {{ docker_pv_name.stdout }}"
+
+ - name: Remove the pv from the old drive
+ command: "pvremove {{ docker_pv_name.stdout }}"
+
+ - name: Extend the docker lvm
+ command: "lvextend -l '90%VG' /dev/{{ docker_vg_name.stdout }}/docker-pool"
+
+ - name: detach old docker volume
+ delegate_to: localhost
+ ec2_vol:
+ region: "{{ ec2_region }}"
+ id: "{{ old_docker_volume_id }}"
+ instance: None
+
+ - name: tag the old vol valid label
+ delegate_to: localhost
+ ec2_tag: region={{ ec2_region }} resource={{old_docker_volume_id}}
+ args:
+ tags:
+ Name: "{{ ec2_tag_Name }} REMOVE ME"
+ register: voltags
+
+ - name: Update the /etc/sysconfig/docker-storage-setup with new device
+ lineinfile:
+ dest: /etc/sysconfig/docker-storage-setup
+ regexp: ^DEVS=
+ line: DEVS=/dev/xvdc
diff --git a/playbooks/adhoc/upgrades/README.md b/playbooks/adhoc/upgrades/README.md
new file mode 100644
index 000000000..6de8a970f
--- /dev/null
+++ b/playbooks/adhoc/upgrades/README.md
@@ -0,0 +1,21 @@
+# [NOTE]
+This playbook will re-run installation steps overwriting any local
+modifications. You should ensure that your inventory has been updated with any
+modifications you've made after your initial installation. If you find any items
+that cannot be configured via ansible please open an issue at
+https://github.com/openshift/openshift-ansible
+
+# Overview
+This playbook is available as a technical preview. It currently performs the
+following steps.
+
+ * Upgrade and restart master services
+ * Upgrade and restart node services
+ * Applies latest configuration by re-running the installation playbook
+ * Applies the latest cluster policies
+ * Updates the default router if one exists
+ * Updates the default registry if one exists
+ * Updates image streams and quickstarts
+
+# Usage
+ansible-playbook -i ~/ansible-inventory openshift-ansible/playbooks/adhoc/upgrades/upgrade.yml
diff --git a/playbooks/adhoc/upgrades/filter_plugins b/playbooks/adhoc/upgrades/filter_plugins
new file mode 120000
index 000000000..b0b7a3414
--- /dev/null
+++ b/playbooks/adhoc/upgrades/filter_plugins
@@ -0,0 +1 @@
+../../../filter_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/lookup_plugins b/playbooks/adhoc/upgrades/lookup_plugins
new file mode 120000
index 000000000..73cafffe5
--- /dev/null
+++ b/playbooks/adhoc/upgrades/lookup_plugins
@@ -0,0 +1 @@
+../../../lookup_plugins/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/roles b/playbooks/adhoc/upgrades/roles
new file mode 120000
index 000000000..e2b799b9d
--- /dev/null
+++ b/playbooks/adhoc/upgrades/roles
@@ -0,0 +1 @@
+../../../roles/ \ No newline at end of file
diff --git a/playbooks/adhoc/upgrades/upgrade.yml b/playbooks/adhoc/upgrades/upgrade.yml
new file mode 100644
index 000000000..e666f0472
--- /dev/null
+++ b/playbooks/adhoc/upgrades/upgrade.yml
@@ -0,0 +1,115 @@
+---
+- name: Re-Run cluster configuration to apply latest configuration changes
+ include: ../../common/openshift-cluster/config.yml
+ vars:
+ g_etcd_group: "{{ 'etcd' }}"
+ g_masters_group: "{{ 'masters' }}"
+ g_nodes_group: "{{ 'nodes' }}"
+ openshift_cluster_id: "{{ cluster_id | default('default') }}"
+ openshift_deployment_type: "{{ deployment_type }}"
+
+- name: Upgrade masters
+ hosts: masters
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade master packages
+ yum: pkg={{ openshift.common.service_type }}-master{{ openshift_version }} state=latest
+ - name: Restart master services
+ service: name="{{ openshift.common.service_type}}-master" state=restarted
+
+- name: Upgrade nodes
+ hosts: nodes
+ vars:
+ openshift_version: "{{ openshift_pkg_version | default('') }}"
+ tasks:
+ - name: Upgrade node packages
+ yum: pkg={{ openshift.common.service_type }}-node{{ openshift_version }} state=latest
+ - name: Restart node services
+ service: name="{{ openshift.common.service_type }}-node" state=restarted
+
+- name: Determine new master version
+ hosts: oo_first_master
+ tasks:
+ - name: Determine new version
+ command: >
+ rpm -q --queryformat '%{version}' {{ openshift.common.service_type }}-master
+ register: _new_version
+
+- name: Ensure AOS 3.0.2 or Origin 1.0.6
+ hosts: oo_first_master
+ tasks:
+ fail: This playbook requires Origin 1.0.6 or Atomic OpenShift 3.0.2 or later
+ when: _new_version.stdout < 1.0.6 or (_new_version.stdout >= 3.0 and _new_version.stdout < 3.0.2)
+
+- name: Update cluster policy
+ hosts: oo_first_master
+ tasks:
+ - name: oadm policy reconcile-cluster-roles --confirm
+ command: >
+ {{ openshift.common.admin_binary}} --config={{ openshift.common.config_base }}/master/admin.kubeconfig
+ policy reconcile-cluster-roles --confirm
+
+- name: Upgrade default router
+ hosts: oo_first_master
+ vars:
+ - router_image: "{{ openshift.master.registry_url | replace( '${component}', 'haproxy-router' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default router
+ command: >
+ {{ oc_cmd }} get -n default dc/router
+ register: _default_router
+ failed_when: false
+ changed_when: false
+ - name: Check for allowHostNetwork and allowHostPorts
+ when: _default_router.rc == 0
+ shell: >
+ {{ oc_cmd }} get -o yaml scc/privileged | /usr/bin/grep -e allowHostPorts -e allowHostNetwork
+ register: _scc
+ - name: Grant allowHostNetwork and allowHostPorts
+ when:
+ - _default_router.rc == 0
+ - "'false' in _scc.stdout"
+ command: >
+ {{ oc_cmd }} patch scc/privileged -p '{"allowHostPorts":true,"allowHostNetwork":true}' --loglevel=9
+ - name: Update deployment config to 1.0.4/3.0.1 spec
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"strategy":{"rollingParams":{"updatePercent":-10},"spec":{"serviceAccount":"router","serviceAccountName":"router"}}}}'
+ - name: Switch to hostNetwork=true
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p '{"spec":{"template":{"spec":{"hostNetwork":true}}}}'
+ - name: Update router image to current version
+ when: _default_router.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/router -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"router","image":"{{ router_image }}"}]}}}}'
+
+- name: Upgrade default
+ hosts: oo_first_master
+ vars:
+ - registry_image: "{{ openshift.master.registry_url | replace( '${component}', 'docker-registry' ) | replace ( '${version}', 'v' + _new_version.stdout ) }}"
+ - oc_cmd: "{{ openshift.common.client_binary }} --config={{ openshift.common.config_base }}/master/admin.kubeconfig"
+ tasks:
+ - name: Check for default registry
+ command: >
+ {{ oc_cmd }} get -n default dc/docker-registry
+ register: _default_registry
+ failed_when: false
+ changed_when: false
+ - name: Update registry image to current version
+ when: _default_registry.rc == 0
+ command: >
+ {{ oc_cmd }} patch dc/docker-registry -p
+ '{"spec":{"template":{"spec":{"containers":[{"name":"registry","image":"{{ registry_image }}"}]}}}}'
+
+- name: Update image streams and templates
+ hosts: oo_first_master
+ vars:
+ openshift_examples_import_command: "update"
+ openshift_deployment_type: "{{ deployment_type }}"
+ roles:
+ - openshift_examples
diff --git a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
index b77bcdc1a..9c699120b 100644
--- a/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
+++ b/playbooks/aws/openshift-cluster/tasks/launch_instances.yml
@@ -172,6 +172,7 @@
- rotate 7
- compress
- sharedscripts
+ - missingok
scripts:
postrotate: "/bin/kill -HUP `cat /var/run/syslogd.pid 2> /dev/null` 2> /dev/null || true"
diff --git a/playbooks/libvirt/openshift-cluster/templates/network.xml b/playbooks/libvirt/openshift-cluster/templates/network.xml
index 86dcd62bb..050bc7ab9 100644
--- a/playbooks/libvirt/openshift-cluster/templates/network.xml
+++ b/playbooks/libvirt/openshift-cluster/templates/network.xml
@@ -8,7 +8,7 @@
<!-- TODO: query for first available virbr interface available -->
<bridge name='virbr3' stp='on' delay='0'/>
<!-- TODO: make overridable -->
- <domain name='example.com'/>
+ <domain name='example.com' localOnly='yes' />
<dns>
<!-- TODO: automatically add host entries -->
</dns>