summaryrefslogtreecommitdiffstats
path: root/bin
diff options
context:
space:
mode:
Diffstat (limited to 'bin')
-rw-r--r--bin/README_SHELL_COMPLETION37
-rwxr-xr-xbin/cluster237
-rwxr-xr-xbin/ohi114
-rw-r--r--bin/openshift-ansible-bin.spec104
-rw-r--r--bin/openshift_ansible.conf.example6
-rw-r--r--bin/openshift_ansible/__init__.py0
-rw-r--r--bin/openshift_ansible/awsutil.py208
l---------bin/openshift_ansible/multi_ec2.py1
-rw-r--r--bin/openshift_ansible/utils.py30
-rwxr-xr-xbin/opscp131
-rwxr-xr-xbin/opssh134
-rwxr-xr-xbin/oscp238
-rwxr-xr-xbin/ossh221
-rwxr-xr-xbin/ossh_bash_completion39
-rw-r--r--bin/ossh_zsh_completion24
-rw-r--r--bin/zsh_functions/_ossh49
16 files changed, 169 insertions, 1404 deletions
diff --git a/bin/README_SHELL_COMPLETION b/bin/README_SHELL_COMPLETION
deleted file mode 100644
index 5f05df7fc..000000000
--- a/bin/README_SHELL_COMPLETION
+++ /dev/null
@@ -1,37 +0,0 @@
-# completion is available for ossh/oscp
-
-ossh/oscp uses a dynamic inventory cache in order to lookup
-hostnames and translate them to something meaningful
-such as an IP address or dns name.
-
-This allows us to treat our servers as cattle and not as pets.
-
-If you have not run the ossh command and it has not laid down
-a cache file the completions will not be available.
-
-You can populate the cache by running `ossh --list`. This
-will populate the cache file and the completions should
-become available.
-
-This script will look at the cached version of your
-multi_ec2 results in ~/.ansible/tmp/multi_ec2_inventory.cache.
-It will then parse a few {host}.{env} out of the json
-and return them to be completable.
-
-# BASH
-In order to setup bash completion, source the following script:
-/path/to/repository/openshift-ansible/bin/ossh_bash_completion
-
-# ZSH
-In order to setup zsh completion, you will need to verify
-that the _ossh_zsh_completion script is somewhere in the path
-of $fpath.
-
-Once $fpath includes the _ossh_zsh_completion script then you should
-run `exec zsh`. This will then allow you to call `ossh host[TAB]`
-for a list of completions.
-
-Before completing the final step, zsh keeps its own cache in
-~/.zcompdump of the known functions and variables. In order to
-refresh with new variables and completion arrays you might need
-to `rm ~/.zcompdump` before running `exec zsh`.
diff --git a/bin/cluster b/bin/cluster
index 746c0349a..b9b2ab15f 100755
--- a/bin/cluster
+++ b/bin/cluster
@@ -5,6 +5,7 @@ import argparse
import ConfigParser
import os
import sys
+import subprocess
import traceback
@@ -23,6 +24,18 @@ class Cluster(object):
'-o ControlMaster=auto '
'-o ControlPersist=600s '
)
+ # Because of `UserKnownHostsFile=/dev/null`
+ # our `.ssh/known_hosts` file most probably misses the ssh host public keys
+ # of our servers.
+ # In that case, ansible serializes the execution of ansible modules
+ # because we might be interactively prompted to accept the ssh host public keys.
+ # Because of `StrictHostKeyChecking=no` we know that we won't be prompted
+ # So, we don't want our modules execution to be serialized.
+ os.environ['ANSIBLE_HOST_KEY_CHECKING'] = 'False'
+ # TODO: A more secure way to proceed would consist in dynamically
+ # retrieving the ssh host public keys from the IaaS interface
+ if 'ANSIBLE_SSH_PIPELINING' not in os.environ:
+ os.environ['ANSIBLE_SSH_PIPELINING'] = 'True'
def get_deployment_type(self, args):
"""
@@ -38,89 +51,130 @@ class Cluster(object):
deployment_type = os.environ['OS_DEPLOYMENT_TYPE']
return deployment_type
+
def create(self, args):
"""
Create an OpenShift cluster for given provider
:param args: command line arguments provided by user
- :return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id,
+ cluster = {'cluster_id': args.cluster_id,
'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/launch.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/launch.yml".format(args.provider)
+ inventory = self.setup_provider(args.provider)
+
+ cluster['num_masters'] = args.masters
+ cluster['num_nodes'] = args.nodes
+ cluster['num_infra'] = args.infra
+ cluster['num_etcd'] = args.etcd
+ cluster['cluster_env'] = args.env
+
+ if args.cloudprovider and args.provider == 'openstack':
+ cluster['openshift_cloudprovider_kind'] = 'openstack'
+ cluster['openshift_cloudprovider_openstack_auth_url'] = os.getenv('OS_AUTH_URL')
+ cluster['openshift_cloudprovider_openstack_username'] = os.getenv('OS_USERNAME')
+ cluster['openshift_cloudprovider_openstack_password'] = os.getenv('OS_PASSWORD')
+ if 'OS_USER_DOMAIN_ID' in os.environ:
+ cluster['openshift_cloudprovider_openstack_domain_id'] = os.getenv('OS_USER_DOMAIN_ID')
+ if 'OS_USER_DOMAIN_NAME' in os.environ:
+ cluster['openshift_cloudprovider_openstack_domain_name'] = os.getenv('OS_USER_DOMAIN_NAME')
+ if 'OS_PROJECT_ID' in os.environ or 'OS_TENANT_ID' in os.environ:
+ cluster['openshift_cloudprovider_openstack_tenant_id'] = os.getenv('OS_PROJECT_ID',os.getenv('OS_TENANT_ID'))
+ if 'OS_PROJECT_NAME' is os.environ or 'OS_TENANT_NAME' in os.environ:
+ cluster['openshift_cloudprovider_openstack_tenant_name'] = os.getenv('OS_PROJECT_NAME',os.getenv('OS_TENANT_NAME'))
+ if 'OS_REGION_NAME' in os.environ:
+ cluster['openshift_cloudprovider_openstack_region'] = os.getenv('OS_REGION_NAME')
+
+ self.action(args, inventory, cluster, playbook)
+
+ def add_nodes(self, args):
+ """
+ Add nodes to an existing cluster for given provider
+ :param args: command line arguments provided by user
+ """
+ cluster = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args),
+ }
+ playbook = "playbooks/{0}/openshift-cluster/add_nodes.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- env['num_masters'] = args.masters
- env['num_nodes'] = args.nodes
- env['num_etcd'] = args.etcd
+ cluster['num_nodes'] = args.nodes
+ cluster['num_infra'] = args.infra
+ cluster['cluster_env'] = args.env
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, cluster, playbook)
def terminate(self, args):
"""
Destroy OpenShift cluster
:param args: command line arguments provided by user
- :return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/terminate.yml".format(args.provider)
+ cluster = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args),
+ 'cluster_env': args.env,
+ }
+ playbook = "playbooks/{0}/openshift-cluster/terminate.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, cluster, playbook)
def list(self, args):
"""
List VMs in cluster
:param args: command line arguments provided by user
- :return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/list.yml".format(args.provider)
+ cluster = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args),
+ 'cluster_env': args.env,
+ }
+ playbook = "playbooks/{0}/openshift-cluster/list.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, cluster, playbook)
def config(self, args):
"""
Configure or reconfigure OpenShift across clustered VMs
:param args: command line arguments provided by user
- :return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/config.yml".format(args.provider)
+ cluster = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args),
+ 'cluster_env': args.env,
+ }
+ playbook = "playbooks/{0}/openshift-cluster/config.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, cluster, playbook)
def update(self, args):
"""
Update to latest OpenShift across clustered VMs
:param args: command line arguments provided by user
- :return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args)}
- playbook = "playbooks/{}/openshift-cluster/update.yml".format(args.provider)
+ cluster = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args),
+ 'cluster_env': args.env,
+ }
+
+ playbook = "playbooks/{0}/openshift-cluster/update.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, cluster, playbook)
def service(self, args):
"""
Make the same service call across all nodes in the cluster
:param args: command line arguments provided by user
- :return: exit status from run command
"""
- env = {'cluster_id': args.cluster_id,
- 'deployment_type': self.get_deployment_type(args),
- 'new_cluster_state': args.state}
+ cluster = {'cluster_id': args.cluster_id,
+ 'deployment_type': self.get_deployment_type(args),
+ 'new_cluster_state': args.state,
+ 'cluster_env': args.env,
+ }
- playbook = "playbooks/{}/openshift-cluster/service.yml".format(args.provider)
+ playbook = "playbooks/{0}/openshift-cluster/service.yml".format(args.provider)
inventory = self.setup_provider(args.provider)
- return self.action(args, inventory, env, playbook)
+ self.action(args, inventory, cluster, playbook)
def setup_provider(self, provider):
"""
@@ -130,10 +184,13 @@ class Cluster(object):
"""
config = ConfigParser.ConfigParser()
if 'gce' == provider:
- config.readfp(open('inventory/gce/hosts/gce.ini'))
+ gce_ini_default_path = os.path.join('inventory/gce/hosts/gce.ini')
+ gce_ini_path = os.environ.get('GCE_INI_PATH', gce_ini_default_path)
+ if os.path.exists(gce_ini_path):
+ config.readfp(open(gce_ini_path))
- for key in config.options('gce'):
- os.environ[key] = config.get('gce', key)
+ for key in config.options('gce'):
+ os.environ[key] = config.get('gce', key)
inventory = '-i inventory/gce/hosts'
elif 'aws' == provider:
@@ -149,10 +206,10 @@ class Cluster(object):
boto_conf_files = ['~/.aws/credentials', '~/.boto']
conf_exists = lambda conf: os.path.isfile(os.path.expanduser(conf))
- boto_configs = [ conf for conf in boto_conf_files if conf_exists(conf)]
+ boto_configs = [conf for conf in boto_conf_files if conf_exists(conf)]
if len(key_missing) > 0 and len(boto_configs) == 0:
- raise ValueError("PROVIDER aws requires {} environment variable(s). See README_AWS.md".format(missing))
+ raise ValueError("PROVIDER aws requires {0} environment variable(s). See README_AWS.md".format(key_missing))
elif 'libvirt' == provider:
inventory = '-i inventory/libvirt/hosts'
@@ -160,53 +217,63 @@ class Cluster(object):
inventory = '-i inventory/openstack/hosts'
else:
# this code should never be reached
- raise ValueError("invalid PROVIDER {}".format(provider))
+ raise ValueError("invalid PROVIDER {0}".format(provider))
return inventory
- def action(self, args, inventory, env, playbook):
+ def action(self, args, inventory, cluster, playbook):
"""
Build ansible-playbook command line and execute
:param args: command line arguments provided by user
:param inventory: derived provider library
- :param env: environment variables for kubernetes
+ :param cluster: cluster variables for kubernetes
:param playbook: ansible playbook to execute
- :return: exit status from ansible-playbook command
"""
verbose = ''
if args.verbose > 0:
- verbose = '-{}'.format('v' * args.verbose)
+ verbose = '-{0}'.format('v' * args.verbose)
if args.option:
for opt in args.option:
k, v = opt.split('=', 1)
- env['cli_' + k] = v
+ cluster['cli_' + k] = v
- ansible_env = '-e \'{}\''.format(
- ' '.join(['%s=%s' % (key, value) for (key, value) in env.items()])
+ ansible_extra_vars = '-e \'{0}\''.format(
+ ' '.join(['%s=%s' % (key, value) for (key, value) in cluster.items()])
)
- command = 'ansible-playbook {} {} {} {}'.format(
- verbose, inventory, ansible_env, playbook
+ command = 'ansible-playbook {0} {1} {2} {3}'.format(
+ verbose, inventory, ansible_extra_vars, playbook
)
if args.profile:
command = 'ANSIBLE_CALLBACK_PLUGINS=ansible-profile/callback_plugins ' + command
if args.verbose > 1:
- command = 'time {}'.format(command)
+ command = 'time {0}'.format(command)
if args.verbose > 0:
- sys.stderr.write('RUN [{}]\n'.format(command))
+ sys.stderr.write('RUN [{0}]\n'.format(command))
sys.stderr.flush()
- return os.system(command)
+ try:
+ subprocess.check_call(command, shell=True)
+ except subprocess.CalledProcessError as exc:
+ raise ActionFailed("ACTION [{0}] failed: {1}"
+ .format(args.action, exc))
+
+
+class ActionFailed(Exception):
+ """
+ Raised when action failed.
+ """
+ pass
if __name__ == '__main__':
"""
- User command to invoke ansible playbooks in a "known" environment
+ User command to invoke ansible playbooks in a "known" configuration
Reads ~/.openshift-ansible for default configuration items
[DEFAULT]
@@ -215,7 +282,14 @@ if __name__ == '__main__':
providers = gce,aws,libvirt,openstack
"""
- environment = ConfigParser.SafeConfigParser({
+ warning = ("================================================================================\n"
+ "ATTENTION: You are running a community supported utility that has not been\n"
+ "tested by Red Hat. Visit https://docs.openshift.com for supported installation\n"
+ "instructions.\n"
+ "================================================================================\n\n")
+ sys.stderr.write(warning)
+
+ cluster_config = ConfigParser.SafeConfigParser({
'cluster_ids': 'marketing,sales',
'validate_cluster_ids': 'False',
'providers': 'gce,aws,libvirt,openstack',
@@ -223,33 +297,49 @@ if __name__ == '__main__':
path = os.path.expanduser("~/.openshift-ansible")
if os.path.isfile(path):
- environment.read(path)
+ cluster_config.read(path)
cluster = Cluster()
parser = argparse.ArgumentParser(
- description='Python wrapper to ensure proper environment for OpenShift ansible playbooks',
+ formatter_class=argparse.RawDescriptionHelpFormatter,
+ description='Python wrapper to ensure proper configuration for OpenShift ansible playbooks',
+ epilog='''\
+This wrapper is overriding the following ansible variables:
+
+ * ANSIBLE_SSH_ARGS:
+ If not set in the environment, this wrapper will use the following value:
+ `-o ForwardAgent=yes -o StrictHostKeyChecking=no -o UserKnownHostsFile=/dev/null -o ControlMaster=auto -o ControlPersist=600s`
+ If set in the environment, the environment variable value is left untouched and used.
+
+ * ANSIBLE_SSH_PIPELINING:
+ If not set in the environment, this wrapper will set it to `True`.
+ If you experience issues with Ansible SSH pipelining, you can disable it by explicitly setting this environment variable to `False`.
+'''
)
parser.add_argument('-v', '--verbose', action='count',
help='Multiple -v options increase the verbosity')
parser.add_argument('--version', action='version', version='%(prog)s 0.3')
meta_parser = argparse.ArgumentParser(add_help=False)
- providers = environment.get('DEFAULT', 'providers').split(',')
+ providers = cluster_config.get('DEFAULT', 'providers').split(',')
meta_parser.add_argument('provider', choices=providers, help='provider')
- if environment.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
- meta_parser.add_argument('cluster_id', choices=environment.get('DEFAULT', 'cluster_ids').split(','),
+ if cluster_config.get('DEFAULT', 'validate_cluster_ids').lower() in ("yes", "true", "1"):
+ meta_parser.add_argument('cluster_id', choices=cluster_config.get('DEFAULT', 'cluster_ids').split(','),
help='prefix for cluster VM names')
else:
meta_parser.add_argument('cluster_id', help='prefix for cluster VM names')
meta_parser.add_argument('-t', '--deployment-type',
- choices=['origin', 'online', 'enterprise'],
+ choices=['origin', 'atomic-enterprise', 'openshift-enterprise'],
help='Deployment type. (default: origin)')
meta_parser.add_argument('-o', '--option', action='append',
help='options')
+ meta_parser.add_argument('--env', default='dev', type=str,
+ help='environment for the cluster. Defaults to \'dev\'.')
+
meta_parser.add_argument('-p', '--profile', action='store_true',
help='Enable playbook profiling')
@@ -258,14 +348,28 @@ if __name__ == '__main__':
create_parser = action_parser.add_parser('create', help='Create a cluster',
parents=[meta_parser])
+ create_parser.add_argument('-c', '--cloudprovider', action='store_true',
+ help='Enable the cloudprovider')
create_parser.add_argument('-m', '--masters', default=1, type=int,
help='number of masters to create in cluster')
create_parser.add_argument('-n', '--nodes', default=2, type=int,
help='number of nodes to create in cluster')
+ create_parser.add_argument('-i', '--infra', default=1, type=int,
+ help='number of infra nodes to create in cluster')
create_parser.add_argument('-e', '--etcd', default=0, type=int,
help='number of external etcd hosts to create in cluster')
create_parser.set_defaults(func=cluster.create)
+
+ create_parser = action_parser.add_parser('add-nodes', help='Add nodes to a cluster',
+ parents=[meta_parser])
+ create_parser.add_argument('-n', '--nodes', default=1, type=int,
+ help='number of nodes to add to the cluster')
+ create_parser.add_argument('-i', '--infra', default=1, type=int,
+ help='number of infra nodes to add to the cluster')
+ create_parser.set_defaults(func=cluster.add_nodes)
+
+
config_parser = action_parser.add_parser('config',
help='Configure or reconfigure a cluster',
parents=[meta_parser])
@@ -299,26 +403,23 @@ if __name__ == '__main__':
args = parser.parse_args()
if 'terminate' == args.action and not args.force:
- answer = raw_input("This will destroy the ENTIRE {} environment. Are you sure? [y/N] ".format(args.cluster_id))
+ answer = raw_input("This will destroy the ENTIRE {0} cluster. Are you sure? [y/N] ".format(args.cluster_id))
if answer not in ['y', 'Y']:
sys.stderr.write('\nACTION [terminate] aborted by user!\n')
exit(1)
if 'update' == args.action and not args.force:
answer = raw_input(
- "This is destructive and could corrupt {} environment. Continue? [y/N] ".format(args.cluster_id))
+ "This is destructive and could corrupt {0} cluster. Continue? [y/N] ".format(args.cluster_id))
if answer not in ['y', 'Y']:
sys.stderr.write('\nACTION [update] aborted by user!\n')
exit(1)
- status = 1
try:
- status = args.func(args)
- if status != 0:
- sys.stderr.write("ACTION [{}] failed with exit status {}\n".format(args.action, status))
- except Exception, e:
+ args.func(args)
+ except Exception as exc:
if args.verbose:
traceback.print_exc(file=sys.stderr)
else:
- sys.stderr.write("{}\n".format(e))
- exit(status)
+ print >>sys.stderr, exc
+ exit(1)
diff --git a/bin/ohi b/bin/ohi
deleted file mode 100755
index d679edcfb..000000000
--- a/bin/ohi
+++ /dev/null
@@ -1,114 +0,0 @@
-#!/usr/bin/env python
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import tempfile
-import time
-import subprocess
-import ConfigParser
-
-from openshift_ansible import awsutil
-from openshift_ansible import utils
-from openshift_ansible.awsutil import ArgumentError
-
-CONFIG_MAIN_SECTION = 'main'
-CONFIG_HOST_TYPE_ALIAS_SECTION = 'host_type_aliases'
-
-
-class Ohi(object):
- def __init__(self):
- self.host_type_aliases = {}
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- self.aws = awsutil.AwsUtil(self.host_type_aliases)
-
- def run(self):
- if self.args.list_host_types:
- self.aws.print_host_types()
- return 0
-
- hosts = None
- if self.args.host_type is not None and \
- self.args.env is not None:
- # Both env and host-type specified
- hosts = self.aws.get_host_list(host_type=self.args.host_type, \
- envs=self.args.env)
-
- if self.args.host_type is None and \
- self.args.env is not None:
- # Only env specified
- hosts = self.aws.get_host_list(envs=self.args.env)
-
- if self.args.host_type is not None and \
- self.args.env is None:
- # Only host-type specified
- hosts = self.aws.get_host_list(host_type=self.args.host_type)
-
- if hosts is None:
- # We weren't able to determine what they wanted to do
- raise ArgumentError("Invalid combination of arguments")
-
- for host in sorted(hosts, key=utils.normalize_dnsname):
- if self.args.user:
- print "%s@%s" % (self.args.user, host)
- else:
- print host
-
- return 0
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- self.host_type_aliases = {}
- if config.has_section(CONFIG_HOST_TYPE_ALIAS_SECTION):
- for alias in config.options(CONFIG_HOST_TYPE_ALIAS_SECTION):
- value = config.get(CONFIG_HOST_TYPE_ALIAS_SECTION, alias).split(',')
- self.host_type_aliases[alias] = value
-
- def parse_cli_args(self):
- """Setup the command line parser with the options we want
- """
-
- parser = argparse.ArgumentParser(description='OpenShift Host Inventory')
-
- parser.add_argument('--list-host-types', default=False, action='store_true',
- help='List all of the host types')
-
- parser.add_argument('-e', '--env', action="store",
- help="Which environment to use")
-
- parser.add_argument('-t', '--host-type', action="store",
- help="Which host type to use")
-
- parser.add_argument('-l', '--user', action='store', default=None,
- help='username')
-
-
- self.args = parser.parse_args()
-
-
-if __name__ == '__main__':
- if len(sys.argv) == 1:
- print "\nError: No options given. Use --help to see the available options\n"
- sys.exit(0)
-
- try:
- ohi = Ohi()
- exitcode = ohi.run()
- sys.exit(exitcode)
- except ArgumentError as e:
- print "\nError: %s\n" % e.message
diff --git a/bin/openshift-ansible-bin.spec b/bin/openshift-ansible-bin.spec
deleted file mode 100644
index fd2386c9a..000000000
--- a/bin/openshift-ansible-bin.spec
+++ /dev/null
@@ -1,104 +0,0 @@
-Summary: OpenShift Ansible Scripts for working with metadata hosts
-Name: openshift-ansible-bin
-Version: 0.0.18
-Release: 1%{?dist}
-License: ASL 2.0
-URL: https://github.com/openshift/openshift-ansible
-Source0: %{name}-%{version}.tar.gz
-Requires: python2, openshift-ansible-inventory
-BuildRequires: python2-devel
-BuildArch: noarch
-
-%description
-Scripts to make it nicer when working with hosts that are defined only by metadata.
-
-%prep
-%setup -q
-
-%build
-
-%install
-mkdir -p %{buildroot}%{_bindir}
-mkdir -p %{buildroot}%{python_sitelib}/openshift_ansible
-mkdir -p %{buildroot}/etc/bash_completion.d
-mkdir -p %{buildroot}/etc/openshift_ansible
-
-cp -p ossh oscp opssh opscp ohi %{buildroot}%{_bindir}
-cp -pP openshift_ansible/* %{buildroot}%{python_sitelib}/openshift_ansible
-
-# Make it so we can load multi_ec2.py as a library.
-rm %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py*
-ln -sf /usr/share/ansible/inventory/multi_ec2.py %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.py
-ln -sf /usr/share/ansible/inventory/multi_ec2.pyc %{buildroot}%{python_sitelib}/openshift_ansible/multi_ec2.pyc
-
-cp -p ossh_bash_completion %{buildroot}/etc/bash_completion.d
-
-cp -p openshift_ansible.conf.example %{buildroot}/etc/openshift_ansible/openshift_ansible.conf
-
-%files
-%{_bindir}/*
-%{python_sitelib}/openshift_ansible/
-/etc/bash_completion.d/*
-%config(noreplace) /etc/openshift_ansible/
-
-%changelog
-* Tue Jun 09 2015 Kenny Woodson <kwoodson@redhat.com> 0.0.18-1
-- Implement OpenStack provider (lhuard@amadeus.com)
-- * Update defaults and examples to track core concepts guide
- (jhonce@redhat.com)
-- Issue 119 - Add support for ~/.openshift-ansible (jhonce@redhat.com)
-- Infrastructure - Add service action to bin/cluster (jhonce@redhat.com)
-
-* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.17-1
-- fixed the openshift-ansible-bin build (twiest@redhat.com)
-
-* Fri May 15 2015 Thomas Wiest <twiest@redhat.com> 0.0.14-1
-- Command line tools import multi_ec2 as lib (kwoodson@redhat.com)
-- Adding cache location for multi ec2 (kwoodson@redhat.com)
-* Thu May 07 2015 Thomas Wiest <twiest@redhat.com> 0.0.13-1
-- added '-e all' to ohi and fixed pylint errors. (twiest@redhat.com)
-
-* Tue May 05 2015 Thomas Wiest <twiest@redhat.com> 0.0.12-1
-- fixed opssh and opscp to allow just environment or just host-type.
- (twiest@redhat.com)
-
-* Mon May 04 2015 Thomas Wiest <twiest@redhat.com> 0.0.11-1
-- changed opssh to a bash script using ohi to make it easier to maintain, and
- to expose all of the pssh features directly. (twiest@redhat.com)
-- Added --user option to ohi to pre-pend the username in the hostlist output.
- (twiest@redhat.com)
-- Added utils.py that contains a normalize_dnsname function good for sorting
- dns names to a human readable list. (twiest@redhat.com)
-
-* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.10-1
-- added --list-host-types option to opscp (twiest@redhat.com)
-
-* Thu Apr 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.9-1
-- added opscp (twiest@redhat.com)
-* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.8-1
-- fixed bug in opssh where it wouldn't actually run pssh (twiest@redhat.com)
-
-* Mon Apr 13 2015 Thomas Wiest <twiest@redhat.com> 0.0.7-1
-- added the ability to run opssh and ohi on all hosts in an environment, as
- well as all hosts of the same host-type regardless of environment
- (twiest@redhat.com)
-- added ohi (twiest@redhat.com)
-* Thu Apr 09 2015 Thomas Wiest <twiest@redhat.com> 0.0.6-1
-- fixed bug where opssh would throw an exception if pssh returned a non-zero
- exit code (twiest@redhat.com)
-
-* Wed Apr 08 2015 Thomas Wiest <twiest@redhat.com> 0.0.5-1
-- fixed the opssh default output behavior to be consistent with pssh. Also
- fixed a bug in how directories are named for --outdir and --errdir.
- (twiest@redhat.com)
-* Tue Mar 31 2015 Thomas Wiest <twiest@redhat.com> 0.0.4-1
-- Fixed when tag was missing and added opssh completion (kwoodson@redhat.com)
-
-* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.3-1
-- created a python package named openshift_ansible (twiest@redhat.com)
-
-* Mon Mar 30 2015 Thomas Wiest <twiest@redhat.com> 0.0.2-1
-- added config file support to opssh, ossh, and oscp (twiest@redhat.com)
-* Tue Mar 24 2015 Thomas Wiest <twiest@redhat.com> 0.0.1-1
-- new package built with tito
-
diff --git a/bin/openshift_ansible.conf.example b/bin/openshift_ansible.conf.example
deleted file mode 100644
index e891b855a..000000000
--- a/bin/openshift_ansible.conf.example
+++ /dev/null
@@ -1,6 +0,0 @@
-#[main]
-#inventory = /usr/share/ansible/inventory/multi_ec2.py
-
-#[host_type_aliases]
-#host-type-one = aliasa,aliasb
-#host-type-two = aliasfortwo
diff --git a/bin/openshift_ansible/__init__.py b/bin/openshift_ansible/__init__.py
deleted file mode 100644
index e69de29bb..000000000
--- a/bin/openshift_ansible/__init__.py
+++ /dev/null
diff --git a/bin/openshift_ansible/awsutil.py b/bin/openshift_ansible/awsutil.py
deleted file mode 100644
index 9df034f57..000000000
--- a/bin/openshift_ansible/awsutil.py
+++ /dev/null
@@ -1,208 +0,0 @@
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-"""This module comprises Aws specific utility functions."""
-
-import os
-import re
-from openshift_ansible import multi_ec2
-
-class ArgumentError(Exception):
- """This class is raised when improper arguments are passed."""
-
- def __init__(self, message):
- """Initialize an ArgumentError.
-
- Keyword arguments:
- message -- the exact error message being raised
- """
- super(ArgumentError, self).__init__()
- self.message = message
-
-class AwsUtil(object):
- """This class contains the AWS utility functions."""
-
- def __init__(self, host_type_aliases=None):
- """Initialize the AWS utility class.
-
- Keyword arguments:
- host_type_aliases -- a list of aliases to common host-types (e.g. ex-node)
- """
-
- host_type_aliases = host_type_aliases or {}
-
- self.host_type_aliases = host_type_aliases
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- self.setup_host_type_alias_lookup()
-
- def setup_host_type_alias_lookup(self):
- """Sets up the alias to host-type lookup table."""
- self.alias_lookup = {}
- for key, values in self.host_type_aliases.iteritems():
- for value in values:
- self.alias_lookup[value] = key
-
- @staticmethod
- def get_inventory(args=None):
- """Calls the inventory script and returns a dictionary containing the inventory."
-
- Keyword arguments:
- args -- optional arguments to pass to the inventory script
- """
- mec2 = multi_ec2.MultiEc2(args)
- mec2.run()
- return mec2.result
-
- def get_environments(self):
- """Searches for env tags in the inventory and returns all of the envs found."""
- pattern = re.compile(r'^tag_environment_(.*)')
-
- envs = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- envs.append(matched.group(1))
-
- envs.sort()
- return envs
-
- def get_host_types(self):
- """Searches for host-type tags in the inventory and returns all host-types found."""
- pattern = re.compile(r'^tag_host-type_(.*)')
-
- host_types = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- host_types.append(matched.group(1))
-
- host_types.sort()
- return host_types
-
- def get_security_groups(self):
- """Searches for security_groups in the inventory and returns all SGs found."""
- pattern = re.compile(r'^security_group_(.*)')
-
- groups = []
- inv = self.get_inventory()
- for key in inv.keys():
- matched = pattern.match(key)
- if matched:
- groups.append(matched.group(1))
-
- groups.sort()
- return groups
-
- def build_host_dict_by_env(self, args=None):
- """Searches the inventory for hosts in an env and returns their hostvars."""
- args = args or []
- inv = self.get_inventory(args)
-
- inst_by_env = {}
- for _, host in inv['_meta']['hostvars'].items():
- # If you don't have an environment tag, we're going to ignore you
- if 'ec2_tag_environment' not in host:
- continue
-
- if host['ec2_tag_environment'] not in inst_by_env:
- inst_by_env[host['ec2_tag_environment']] = {}
- host_id = "%s:%s" % (host['ec2_tag_Name'], host['ec2_id'])
- inst_by_env[host['ec2_tag_environment']][host_id] = host
-
- return inst_by_env
-
- def print_host_types(self):
- """Gets the list of host types and aliases and outputs them in columns."""
- host_types = self.get_host_types()
- ht_format_str = "%35s"
- alias_format_str = "%-20s"
- combined_format_str = ht_format_str + " " + alias_format_str
-
- print
- print combined_format_str % ('Host Types', 'Aliases')
- print combined_format_str % ('----------', '-------')
-
- for host_type in host_types:
- aliases = []
- if host_type in self.host_type_aliases:
- aliases = self.host_type_aliases[host_type]
- print combined_format_str % (host_type, ", ".join(aliases))
- else:
- print ht_format_str % host_type
- print
-
- def resolve_host_type(self, host_type):
- """Converts a host-type alias into a host-type.
-
- Keyword arguments:
- host_type -- The alias or host_type to look up.
-
- Example (depends on aliases defined in config file):
- host_type = ex-node
- returns: openshift-node
- """
- if self.alias_lookup.has_key(host_type):
- return self.alias_lookup[host_type]
- return host_type
-
- @staticmethod
- def gen_env_tag(env):
- """Generate the environment tag
- """
- return "tag_environment_%s" % env
-
- def gen_host_type_tag(self, host_type):
- """Generate the host type tag
- """
- host_type = self.resolve_host_type(host_type)
- return "tag_host-type_%s" % host_type
-
- def gen_env_host_type_tag(self, host_type, env):
- """Generate the environment host type tag
- """
- host_type = self.resolve_host_type(host_type)
- return "tag_env-host-type_%s-%s" % (env, host_type)
-
- def get_host_list(self, host_type=None, envs=None):
- """Get the list of hosts from the inventory using host-type and environment
- """
- envs = envs or []
- inv = self.get_inventory()
-
- # We prefer to deal with a list of environments
- if issubclass(type(envs), basestring):
- if envs == 'all':
- envs = self.get_environments()
- else:
- envs = [envs]
-
- if host_type and envs:
- # Both host type and environment were specified
- retval = []
- for env in envs:
- env_host_type_tag = self.gen_env_host_type_tag(host_type, env)
- if env_host_type_tag in inv.keys():
- retval += inv[env_host_type_tag]
- return set(retval)
-
- if envs and not host_type:
- # Just environment was specified
- retval = []
- for env in envs:
- env_tag = AwsUtil.gen_env_tag(env)
- if env_tag in inv.keys():
- retval += inv[env_tag]
- return set(retval)
-
- if host_type and not envs:
- # Just host-type was specified
- retval = []
- host_type_tag = self.gen_host_type_tag(host_type)
- if host_type_tag in inv.keys():
- retval = inv[host_type_tag]
- return set(retval)
-
- # We should never reach here!
- raise ArgumentError("Invalid combination of parameters")
diff --git a/bin/openshift_ansible/multi_ec2.py b/bin/openshift_ansible/multi_ec2.py
deleted file mode 120000
index 660a0418e..000000000
--- a/bin/openshift_ansible/multi_ec2.py
+++ /dev/null
@@ -1 +0,0 @@
-../../inventory/multi_ec2.py \ No newline at end of file
diff --git a/bin/openshift_ansible/utils.py b/bin/openshift_ansible/utils.py
deleted file mode 100644
index e6243aa5a..000000000
--- a/bin/openshift_ansible/utils.py
+++ /dev/null
@@ -1,30 +0,0 @@
-#!/usr/bin/env python
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-''' The purpose of this module is to contain small utility functions.
-'''
-
-import re
-
-def normalize_dnsname(name, padding=10):
- ''' The purpose of this function is to return a dns name with zero padding,
- so that it sorts properly (as a human would expect).
-
- Example: name=ex-lrg-node10.prod.rhcloud.com
- Returns: ex-lrg-node0000000010.prod.rhcloud.com
-
- Example Usage:
- sorted(['a3.example.com', 'a10.example.com', 'a1.example.com'],
- key=normalize_dnsname)
-
- Returns: ['a1.example.com', 'a3.example.com', 'a10.example.com']
- '''
- parts = re.split(r'(\d+)', name)
- retval = []
- for part in parts:
- if re.match(r'^\d+$', part):
- retval.append(part.zfill(padding))
- else:
- retval.append(part)
-
- return ''.join(retval)
diff --git a/bin/opscp b/bin/opscp
deleted file mode 100755
index 391cb6696..000000000
--- a/bin/opscp
+++ /dev/null
@@ -1,131 +0,0 @@
-#!/bin/bash
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-
-function usage() {
- cat << EOF
-Usage: opscp [OPTIONS] local remote
-
-Options:
- --version show program's version number and exit
- --help show this help message and exit
- -l USER, --user=USER username (OPTIONAL)
- -p PAR, --par=PAR max number of parallel threads (OPTIONAL)
- --outdir=OUTDIR output directory for stdout files (OPTIONAL)
- --errdir=ERRDIR output directory for stderr files (OPTIONAL)
- -e ENV, --env ENV which environment to use
- -t HOST_TYPE, --host-type HOST_TYPE
- which host type to use
- --list-host-types list all of the host types
- --timeout=TIMEOUT timeout (secs) (0 = no timeout) per host (OPTIONAL)
- -O OPTION, --option=OPTION
- SSH option (OPTIONAL)
- -v, --verbose turn on warning and diagnostic messages (OPTIONAL)
- -A, --askpass Ask for a password (OPTIONAL)
- -x ARGS, --extra-args=ARGS
- Extra command-line arguments, with processing for
- spaces, quotes, and backslashes
- -X ARG, --extra-arg=ARG
- Extra command-line argument
- -r, --recursive recusively copy directories (OPTIONAL)
-
-Example: opscp -t ex-srv -e stg -l irb2 foo.txt /home/irb2/foo.txt
-
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "--help" ]
-then
- usage
- exit 1
-fi
-
-# See if ohi is installed
-if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
-
- exit 10
-fi
-
-PAR=200
-USER=root
-TIMEOUT=0
-ENV=""
-HOST_TYPE=""
-
-while [ $# -gt 0 ] ; do
- case $1 in
- -t|--host-type)
- shift # get past the option
- HOST_TYPE=$1
- shift # get past the value of the option
- ;;
-
- -e)
- shift # get past the option
- ENV=$1
- shift # get past the value of the option
- ;;
-
- --timeout)
- shift # get past the option
- TIMEOUT=$1
- shift # get past the value of the option
- ;;
-
- -p|--par)
- shift # get past the option
- PAR=$1
- shift # get past the value of the option
- ;;
-
- -l|--user)
- shift # get past the option
- USER=$1
- shift # get past the value of the option
- ;;
-
- --list-host-types)
- ohi --list-host-types
- exit 0
- ;;
-
- -h|--hosts|-H|--host|-o)
- echo "ERROR: unknown option $1"
- exit 20
- ;;
-
- *)
- args+=("$1")
- shift
- ;;
- esac
-done
-
-# Get host list from ohi
-if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then
- HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)"
- OHI_ECODE=$?
-elif [ -n "$ENV" ] ; then
- HOSTS="$(ohi -e "$ENV" 2>/dev/null)"
- OHI_ECODE=$?
-elif [ -n "$HOST_TYPE" ] ; then
- HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)"
- OHI_ECODE=$?
-else
- echo
- echo "Error: either -e or -t must be specified"
- echo
- exit 10
-fi
-
-if [ $OHI_ECODE -ne 0 ] ; then
- echo
- echo "ERROR: ohi failed with exit code $OHI_ECODE"
- echo
- echo "This is usually caused by a bad value passed for host-type or environment."
- echo
- exit 25
-fi
-
-exec pscp.pssh -t $TIMEOUT -p $PAR -l $USER -h <(echo "$HOSTS") "${args[@]}"
diff --git a/bin/opssh b/bin/opssh
deleted file mode 100755
index 8ac526049..000000000
--- a/bin/opssh
+++ /dev/null
@@ -1,134 +0,0 @@
-#!/bin/bash
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-
-function usage() {
- cat << EOF
-Usage: opssh [OPTIONS] command [...]
-
-Options:
- --version show program's version number and exit
- --help show this help message and exit
- -l USER, --user=USER username (OPTIONAL)
- -p PAR, --par=PAR max number of parallel threads (OPTIONAL)
- --outdir=OUTDIR output directory for stdout files (OPTIONAL)
- --errdir=ERRDIR output directory for stderr files (OPTIONAL)
- -e ENV, --env ENV which environment to use
- -t HOST_TYPE, --host-type HOST_TYPE
- which host type to use
- --list-host-types list all of the host types
- --timeout=TIMEOUT timeout (secs) (0 = no timeout) per host (OPTIONAL)
- -O OPTION, --option=OPTION
- SSH option (OPTIONAL)
- -v, --verbose turn on warning and diagnostic messages (OPTIONAL)
- -A, --askpass Ask for a password (OPTIONAL)
- -x ARGS, --extra-args=ARGS
- Extra command-line arguments, with processing for
- spaces, quotes, and backslashes
- -X ARG, --extra-arg=ARG
- Extra command-line argument
- -i, --inline inline aggregated output and error for each server
- --inline-stdout inline standard output for each server
- -I, --send-input read from standard input and send as input to ssh
- -P, --print print output as we get it
-
-Example: opssh -t ex-srv -e stg -l irb2 --outdir /tmp/foo uptime
-
-EOF
-}
-
-if [ $# -eq 0 ] || [ "$1" == "--help" ]
-then
- usage
- exit 1
-fi
-
-# See if ohi is installed
-if ! which ohi &>/dev/null ; then
- echo "ERROR: can't find ohi (OpenShift Host Inventory) on your system, please either install the openshift-ansible-bin package, or add openshift-ansible/bin to your path."
-
- exit 10
-fi
-
-PAR=200
-USER=root
-TIMEOUT=0
-ARGS=()
-ENV=""
-HOST_TYPE=""
-while [ $# -gt 0 ] ; do
- case $1 in
- -t|--host-type)
- shift # get past the option
- HOST_TYPE=$1
- shift # get past the value of the option
- ;;
-
- -e)
- shift # get past the option
- ENV=$1
- shift # get past the value of the option
- ;;
-
- --timeout)
- shift # get past the option
- TIMEOUT=$1
- shift # get past the value of the option
- ;;
-
- -p|--par)
- shift # get past the option
- PAR=$1
- shift # get past the value of the option
- ;;
-
- -l|--user)
- shift # get past the option
- USER=$1
- shift # get past the value of the option
- ;;
-
- --list-host-types)
- ohi --list-host-types
- exit 0
- ;;
-
- -h|--hosts|-H|--host|-o)
- echo "ERROR: unknown option $1"
- exit 20
- ;;
-
- *)
- args+=("$1")
- shift
- ;;
- esac
-done
-
-# Get host list from ohi
-if [ -n "$ENV" -a -n "$HOST_TYPE" ] ; then
- HOSTS="$(ohi -t "$HOST_TYPE" -e "$ENV" 2>/dev/null)"
- OHI_ECODE=$?
-elif [ -n "$ENV" ] ; then
- HOSTS="$(ohi -e "$ENV" 2>/dev/null)"
- OHI_ECODE=$?
-elif [ -n "$HOST_TYPE" ] ; then
- HOSTS="$(ohi -t "$HOST_TYPE" 2>/dev/null)"
- OHI_ECODE=$?
-else
- echo
- echo "Error: either -e or -t must be specified"
- echo
- exit 10
-fi
-
-if [ $OHI_ECODE -ne 0 ] ; then
- echo
- echo "ERROR: ohi failed with exit code $OHI_ECODE"
- echo
- echo "This is usually caused by a bad value passed for host-type or environment."
- echo
- exit 25
-fi
-
-exec pssh -t $TIMEOUT -p $PAR -l $USER -h <(echo "$HOSTS") "${args[@]}"
diff --git a/bin/oscp b/bin/oscp
deleted file mode 100755
index 91fc45cd3..000000000
--- a/bin/oscp
+++ /dev/null
@@ -1,238 +0,0 @@
-#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import ConfigParser
-
-from openshift_ansible import awsutil
-
-CONFIG_MAIN_SECTION = 'main'
-
-class Oscp(object):
- def __init__(self):
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- # parse host and user
- self.process_host()
-
- self.aws = awsutil.AwsUtil()
-
- # get a dict of host inventory
- if self.args.refresh_cache:
- self.get_hosts(True)
- else:
- self.get_hosts()
-
- if (self.args.src == '' or self.args.dest == '') and not self.args.list:
- self.parser.print_help()
- return
-
- if self.args.debug:
- print self.host
- print self.args
-
- # perform the scp
- if self.args.list:
- self.list_hosts()
- else:
- self.scp()
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- def parse_cli_args(self):
- parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-e', '--env',
- action="store", help="Environment where this server exists.")
- parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
- parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
- parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
- parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
- parser.add_argument('-r', '--recurse', action='store_true', default=False,
- help='Recursively copy files to or from destination.')
- parser.add_argument('-o', '--ssh_opts', action='store',
- help='options to pass to SSH.\n \
- "-oPort=22,TCPKeepAlive=yes"')
-
- parser.add_argument('src', nargs='?', default='')
- parser.add_argument('dest',nargs='?', default='')
-
- self.args = parser.parse_args()
- self.parser = parser
-
-
- def process_host(self):
- '''Determine host name and user name for SSH.
- '''
- self.user = ''
-
- # is the first param passed a valid file?
- if os.path.isfile(self.args.src) or os.path.isdir(self.args.src):
- self.local_src = True
- self.host = self.args.dest
- else:
- self.local_src = False
- self.host = self.args.src
-
- if '@' in self.host:
- re_host = re.compile("(.*@)(.*)(:.*$)")
- else:
- re_host = re.compile("(.*)(:.*$)")
-
- search = re_host.search(self.host)
-
- if search:
- if len(search.groups()) > 2:
- self.user = search.groups()[0]
- self.host = search.groups()[1]
- self.path = search.groups()[2]
- else:
- self.host = search.groups()[0]
- self.path = search.groups()[1]
-
- if self.args.env:
- self.env = self.args.env
- elif "." in self.host:
- self.host, self.env = self.host.split(".")
- else:
- self.env = None
-
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format
- equals:
-
- dict['environment'] = [{'servername' : {}}, ]
- '''
- if refresh_cache:
- self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache'])
- else:
- self.host_inventory = self.aws.build_host_dict_by_env()
-
- def select_host(self):
- '''select host attempts to match the host specified
- on the command line with a list of hosts.
- '''
- results = []
- for env in self.host_inventory.keys():
- for hostname, server_info in self.host_inventory[env].items():
- if hostname.split(':')[0] == self.host:
- results.append((hostname, server_info))
-
- # attempt to select the correct environment if specified
- if self.env:
- results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results)
-
- if results:
- return results
- else:
- print "Could not find specified host: %s." % self.host
-
- # default - no results found.
- return None
-
- def list_hosts(self, limit=None):
- '''Function to print out the host inventory.
-
- Takes a single parameter to limit the number of hosts printed.
- '''
-
- if self.env:
- results = self.select_host()
- if len(results) == 1:
- hostname, server_info = results[0]
- sorted_keys = server_info.keys()
- sorted_keys.sort()
- for key in sorted_keys:
- print '{0:<35} {1}'.format(key, server_info[key])
- else:
- for host_id, server_info in results[:limit]:
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
-
- if limit:
- print
- print 'Showing only the first %d results...' % limit
- print
-
- else:
- for env, host_ids in self.host_inventory.items():
- for host_id, server_info in host_ids.items():
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
-
- def scp(self):
- '''scp files to or from a specified host
- '''
- try:
- # shell args start with the program name in position 1
- scp_args = ['/usr/bin/scp']
-
- if self.args.verbose:
- scp_args.append('-v')
-
- if self.args.recurse:
- scp_args.append('-r')
-
- if self.args.ssh_opts:
- for arg in self.args.ssh_opts.split(","):
- scp_args.append("-o%s" % arg)
-
- results = self.select_host()
-
- if self.args.debug: print results
-
- if not results:
- return # early exit, no results
-
- if len(results) > 1:
- print "Multiple results found for %s." % self.host
- for result in results:
- print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1])
- return # early exit, too many results
-
- # Assume we have one and only one.
- hostname, server_info = results[0]
- dns = server_info['ec2_public_dns_name']
-
- host_str = "%s%s%s" % (self.user, dns, self.path)
-
- if self.local_src:
- scp_args.append(self.args.src)
- scp_args.append(host_str)
- else:
- scp_args.append(host_str)
- scp_args.append(self.args.dest)
-
- print "Running: %s\n" % ' '.join(scp_args)
-
- os.execve('/usr/bin/scp', scp_args, os.environ)
- except:
- print traceback.print_exc()
- print sys.exc_info()
-
-
-if __name__ == '__main__':
- oscp = Oscp()
-
diff --git a/bin/ossh b/bin/ossh
deleted file mode 100755
index 2ed033305..000000000
--- a/bin/ossh
+++ /dev/null
@@ -1,221 +0,0 @@
-#!/usr/bin/env python2
-# vim: expandtab:tabstop=4:shiftwidth=4
-
-import argparse
-import traceback
-import sys
-import os
-import re
-import ConfigParser
-
-from openshift_ansible import awsutil
-
-CONFIG_MAIN_SECTION = 'main'
-
-class Ossh(object):
- def __init__(self):
- self.file_path = os.path.join(os.path.dirname(os.path.realpath(__file__)))
-
- # Default the config path to /etc
- self.config_path = os.path.join(os.path.sep, 'etc', \
- 'openshift_ansible', \
- 'openshift_ansible.conf')
-
- self.parse_cli_args()
- self.parse_config_file()
-
- self.aws = awsutil.AwsUtil()
-
- if self.args.refresh_cache:
- self.get_hosts(True)
- else:
- self.get_hosts()
-
- # parse host and user
- self.process_host()
-
- if self.args.host == '' and not self.args.list:
- self.parser.print_help()
- return
-
- if self.args.debug:
- print self.args
-
- # perform the SSH
- if self.args.list:
- self.list_hosts()
- else:
- self.ssh()
-
- def parse_config_file(self):
- if os.path.isfile(self.config_path):
- config = ConfigParser.ConfigParser()
- config.read(self.config_path)
-
- def parse_cli_args(self):
- parser = argparse.ArgumentParser(description='OpenShift Online SSH Tool.')
- parser.add_argument('-e', '--env', action="store",
- help="Which environment to search for the host ")
- parser.add_argument('-d', '--debug', default=False,
- action="store_true", help="debug mode")
- parser.add_argument('-v', '--verbose', default=False,
- action="store_true", help="Verbose?")
- parser.add_argument('--refresh-cache', default=False,
- action="store_true", help="Force a refresh on the host cache.")
- parser.add_argument('--list', default=False,
- action="store_true", help="list out hosts")
- parser.add_argument('-c', '--command', action='store',
- help='Command to run on remote host')
- parser.add_argument('-l', '--login_name', action='store',
- help='User in which to ssh as')
-
- parser.add_argument('-o', '--ssh_opts', action='store',
- help='options to pass to SSH.\n \
- "-oForwardX11=yes,TCPKeepAlive=yes"')
- parser.add_argument('host', nargs='?', default='')
-
- self.args = parser.parse_args()
- self.parser = parser
-
-
- def process_host(self):
- '''Determine host name and user name for SSH.
- '''
- self.env = None
- self.user = None
-
- re_env = re.compile("\.(" + "|".join(self.host_inventory.keys()) + ")")
- search = re_env.search(self.args.host)
- if self.args.env:
- self.env = self.args.env
- elif search:
- # take the first?
- self.env = search.groups()[0]
-
- # remove env from hostname command line arg if found
- if search:
- self.args.host = re_env.split(self.args.host)[0]
-
- # parse username if passed
- if '@' in self.args.host:
- self.user, self.host = self.args.host.split('@')
- else:
- self.host = self.args.host
- if self.args.login_name:
- self.user = self.args.login_name
-
- def get_hosts(self, refresh_cache=False):
- '''Query our host inventory and return a dict where the format
- equals:
-
- dict['servername'] = dns_name
- '''
- if refresh_cache:
- self.host_inventory = self.aws.build_host_dict_by_env(['--refresh-cache'])
- else:
- self.host_inventory = self.aws.build_host_dict_by_env()
-
- def select_host(self):
- '''select host attempts to match the host specified
- on the command line with a list of hosts.
- '''
- results = []
- for env in self.host_inventory.keys():
- for hostname, server_info in self.host_inventory[env].items():
- if hostname.split(':')[0] == self.host:
- results.append((hostname, server_info))
-
- # attempt to select the correct environment if specified
- if self.env:
- results = filter(lambda result: result[1]['ec2_tag_environment'] == self.env, results)
-
- if results:
- return results
- else:
- print "Could not find specified host: %s." % self.host
-
- # default - no results found.
- return None
-
- def list_hosts(self, limit=None):
- '''Function to print out the host inventory.
-
- Takes a single parameter to limit the number of hosts printed.
- '''
-
- if self.env:
- results = self.select_host()
- if len(results) == 1:
- hostname, server_info = results[0]
- sorted_keys = server_info.keys()
- sorted_keys.sort()
- for key in sorted_keys:
- print '{0:<35} {1}'.format(key, server_info[key])
- else:
- for host_id, server_info in results[:limit]:
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_environment:<8} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
-
- if limit:
- print
- print 'Showing only the first %d results...' % limit
- print
-
- else:
- for env, host_ids in self.host_inventory.items():
- for host_id, server_info in host_ids.items():
- name = server_info['ec2_tag_Name']
- ec2_id = server_info['ec2_id']
- ip = server_info['ec2_ip_address']
- print '{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<15} {ec2_ip_address}'.format(**server_info)
-
- def ssh(self):
- '''SSH to a specified host
- '''
- try:
- # shell args start with the program name in position 1
- ssh_args = ['/usr/bin/ssh']
-
- if self.user:
- ssh_args.append('-l%s' % self.user)
-
- if self.args.verbose:
- ssh_args.append('-vvv')
-
- if self.args.ssh_opts:
- for arg in self.args.ssh_opts.split(","):
- ssh_args.append("-o%s" % arg)
-
- results = self.select_host()
- if not results:
- return # early exit, no results
-
- if len(results) > 1:
- print "Multiple results found for %s." % self.host
- for result in results:
- print "{ec2_tag_Name:<35} {ec2_tag_environment:<5} {ec2_id:<10}".format(**result[1])
- return # early exit, too many results
-
- # Assume we have one and only one.
- hostname, server_info = results[0]
- dns = server_info['ec2_public_dns_name']
-
- ssh_args.append(dns)
-
- #last argument
- if self.args.command:
- ssh_args.append("%s" % self.args.command)
-
- print "Running: %s\n" % ' '.join(ssh_args)
-
- os.execve('/usr/bin/ssh', ssh_args, os.environ)
- except:
- print traceback.print_exc()
- print sys.exc_info()
-
-
-if __name__ == '__main__':
- ossh = Ossh()
-
diff --git a/bin/ossh_bash_completion b/bin/ossh_bash_completion
deleted file mode 100755
index 1467de858..000000000
--- a/bin/ossh_bash_completion
+++ /dev/null
@@ -1,39 +0,0 @@
-__ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items() if all(k in host for k in ("ec2_tag_Name", "ec2_tag_environment"))])'
-
- fi
-}
-
-_ossh()
-{
- local cur prev known_hosts
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- known_hosts="$(__ossh_known_hosts)"
- COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
-
- return 0
-}
-complete -F _ossh ossh oscp
-
-__opssh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- /usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s" % (host["ec2_tag_host-type"]) for dns, host in z["_meta"]["hostvars"].items() if "ec2_tag_host-type" in host])'
- fi
-}
-
-_opssh()
-{
- local cur prev known_hosts
- COMPREPLY=()
- cur="${COMP_WORDS[COMP_CWORD]}"
- prev="${COMP_WORDS[COMP_CWORD-1]}"
- known_hosts="$(__opssh_known_hosts)"
- COMPREPLY=( $(compgen -W "${known_hosts}" -- ${cur}))
-
- return 0
-}
-complete -F _opssh opssh
-
diff --git a/bin/ossh_zsh_completion b/bin/ossh_zsh_completion
deleted file mode 100644
index 6ab930dc4..000000000
--- a/bin/ossh_zsh_completion
+++ /dev/null
@@ -1,24 +0,0 @@
-#compdef ossh oscp
-
-_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
- fi
-
-}
-_ossh(){
- local curcontext="$curcontext" state line
- typeset -A opt_args
-
-#_arguments "*:Hosts:_ossh_known_hosts"
- _arguments -s : \
- "*:hosts:->hosts"
-
- case "$state" in
- hosts)
- _values 'hosts' $(_ossh_known_hosts)
- ;;
- esac
-
-}
-_ossh "$@"
diff --git a/bin/zsh_functions/_ossh b/bin/zsh_functions/_ossh
deleted file mode 100644
index 7c6cb7b0b..000000000
--- a/bin/zsh_functions/_ossh
+++ /dev/null
@@ -1,49 +0,0 @@
-#compdef ossh oscp
-
-_ossh_known_hosts(){
- if [[ -f ~/.ansible/tmp/multi_ec2_inventory.cache ]]; then
- print $(/usr/bin/python -c 'import json,os; z = json.loads(open("%s"%os.path.expanduser("~/.ansible/tmp/multi_ec2_inventory.cache")).read()); print "\n".join(["%s.%s" % (host["ec2_tag_Name"],host["ec2_tag_environment"]) for dns, host in z["_meta"]["hostvars"].items()])')
- fi
-}
-
-_ossh(){
- local curcontext="$curcontext" state line
- typeset -A opt_args
-
- common_arguments=(
- '(- *)'{-h,--help}'[show help]' \
- {-v,--verbose}'[enable verbose]' \
- {-d,--debug}'[debug mode]' \
- {-l,--login_name}+'[login name]:login_name' \
- {-c,--command}+'[command to run on remote host]:command' \
- {-o,--ssh_opts}+'[SSH Options to pass to SSH]:ssh options' \
- {-e,--env}+'[environtment to use]:environment:->env' \
- '--list[list out hosts]' \
- ':OP Hosts:->oo_hosts'
- )
-
- case "$service" in
- ossh)
- _arguments -C -s \
- "$common_arguments[@]" \
- ;;
-
- oscp)
- _arguments -C -s \
- "$common_arguments[@]" \
- {-r,--recurse}'[Recursive copy]' \
- ':file:_files'
- ;;
- esac
-
- case "$state" in
- oo_hosts)
- _values 'oo_hosts' $(_ossh_known_hosts)
- ;;
- env)
- _values 'environment' ops int stg prod
- ;;
- esac
-}
-
-_ossh "$@"