1
0
mirror of https://github.com/openshift/openshift-ansible.git synced 2026-02-05 06:46:04 +01:00

Remove atomic-openshift-utils

This commit is contained in:
Scott Dodson
2017-10-13 13:32:24 -04:00
parent 615d71a6c9
commit 81edec6de1
32 changed files with 13 additions and 4646 deletions

View File

@@ -17,10 +17,9 @@ If you're operating from a **git clone**:
* The output of `git describe`
If you're running from playbooks installed via RPM or
`atomic-openshift-utils`
If you're running from playbooks installed via RPM
* The output of `rpm -q atomic-openshift-utils openshift-ansible`
* The output of `rpm -q openshift-ansible`
Place the output between the code block below:

View File

@@ -5,7 +5,7 @@ MAINTAINER OpenShift Team <dev@lists.openshift.redhat.com>
USER root
# Playbooks, roles, and their dependencies are installed from packages.
RUN INSTALL_PKGS="atomic-openshift-utils atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
RUN INSTALL_PKGS="openshift-ansible atomic-openshift-clients python-boto python2-boto3 python2-crypto openssl java-1.8.0-openjdk-headless httpd-tools google-cloud-sdk" \
&& yum repolist > /dev/null \
&& yum-config-manager --enable rhel-7-server-ose-3.7-rpms \
&& yum-config-manager --enable rhel-7-server-rh-common-rpms \
@@ -30,7 +30,7 @@ LABEL name="openshift3/ose-ansible" \
ENV USER_UID=1001 \
HOME=/opt/app-root/src \
WORK_DIR=/usr/share/ansible/openshift-ansible \
ANSIBLE_CONFIG=/usr/share/atomic-openshift-utils/ansible.cfg \
ANSIBLE_CONFIG=/usr/share/ansible/openshift-ansible/ansible.cfg \
OPTS="-v"
# Add image scripts and files for running as a system container

View File

@@ -6,7 +6,7 @@
"VAR_LOG_OPENSHIFT_LOG": "/var/log/ansible.log",
"PLAYBOOK_FILE": "/usr/share/ansible/openshift-ansible/playbooks/deploy_cluster.yml",
"HOME_ROOT": "/root",
"ANSIBLE_CONFIG": "/usr/share/atomic-openshift-utils/ansible.cfg",
"ANSIBLE_CONFIG": "/usr/share/ansible/openshift-ansible/ansible.cfg",
"INVENTORY_FILE": "/dev/null"
}
}

View File

@@ -24,6 +24,7 @@ Requires: tar
Requires: %{name}-docs = %{version}-%{release}
Requires: %{name}-playbooks = %{version}-%{release}
Requires: %{name}-roles = %{version}-%{release}
Obsoletes: atomic-openshift-utils <= 3.10
Requires: java-1.8.0-openjdk-headless
Requires: httpd-tools
Requires: libselinux-python
@@ -42,16 +43,13 @@ for Openshift and Atomic Enterprise.
%build
# atomic-openshift-utils install
pushd utils
%{__python} setup.py build
popd
%install
# Base openshift-ansible install
mkdir -p %{buildroot}%{_datadir}/%{name}
mkdir -p %{buildroot}%{_datadir}/ansible/%{name}/inventory
cp -rp inventory/dynamic %{buildroot}%{_datadir}/ansible/%{name}/inventory
cp etc/ansible.cfg %{buildroot}%{_datadir}/ansible/%{name}/ansible.cfg
cp etc/ansible-quiet.cfg %{buildroot}%{_datadir}/ansible/%{name}/ansible-quiet.cfg
# openshift-ansible-bin install
mkdir -p %{buildroot}%{_bindir}
@@ -83,18 +81,6 @@ rm -rf %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/*
# touch a file in contiv so that it can be added to SCM's
touch %{buildroot}%{_datadir}/ansible/%{name}/roles/contiv/.empty_dir
# atomic-openshift-utils install
pushd utils
%{__python} setup.py install --skip-build --root %{buildroot}
# Remove this line once the name change has happened
mv -f %{buildroot}%{_bindir}/oo-install %{buildroot}%{_bindir}/atomic-openshift-installer
mkdir -p %{buildroot}%{_datadir}/atomic-openshift-utils/
cp etc/ansible.cfg %{buildroot}%{_datadir}/atomic-openshift-utils/ansible.cfg
mkdir -p %{buildroot}%{_mandir}/man1/
cp -v docs/man/man1/atomic-openshift-installer.1 %{buildroot}%{_mandir}/man1/
cp etc/ansible-quiet.cfg %{buildroot}%{_datadir}/atomic-openshift-utils/ansible-quiet.cfg
popd
# Base openshift-ansible files
%files
%doc README*
@@ -102,6 +88,8 @@ popd
%dir %{_datadir}/ansible/%{name}
%{_datadir}/ansible/%{name}/inventory/dynamic
%ghost %{_datadir}/ansible/%{name}/playbooks/common/openshift-master/library.rpmmoved
%{_datadir}/ansible/%{name}/ansible.cfg
%{_datadir}/ansible/%{name}/ansible-quiet.cfg
# ----------------------------------------------------------------------------------
# openshift-ansible-docs subpackage
@@ -175,30 +163,6 @@ BuildArch: noarch
%files roles
%{_datadir}/ansible/%{name}/roles
# ----------------------------------------------------------------------------------
# atomic-openshift-utils subpackage
# ----------------------------------------------------------------------------------
%package -n atomic-openshift-utils
Summary: Atomic OpenShift Utilities
BuildRequires: python-setuptools
Requires: %{name}-playbooks = %{version}-%{release}
Requires: python-click
Requires: python-setuptools
Requires: PyYAML
BuildArch: noarch
%description -n atomic-openshift-utils
Atomic OpenShift Utilities includes
- atomic-openshift-installer
- other utilities
%files -n atomic-openshift-utils
%{python_sitelib}/ooinstall*
%{_bindir}/atomic-openshift-installer
%{_datadir}/atomic-openshift-utils/ansible.cfg
%{_mandir}/man1/*
%{_datadir}/atomic-openshift-utils/ansible-quiet.cfg
%changelog

View File

@@ -5,4 +5,4 @@
universal=1
[yamllint]
excludes=.tox,utils,files
excludes=.tox,files

View File

@@ -179,7 +179,7 @@ class OpenShiftAnsiblePylint(PylintCommand):
# pylint: disable=no-self-use
def find_all_modules(self):
''' find all python files to test '''
exclude_dirs = ('.tox', 'utils', 'test', 'tests', 'git')
exclude_dirs = ('.tox', 'test', 'tests', 'git')
modules = []
for match in find_files(os.getcwd(), exclude_dirs, None, r'\.py$'):
package = os.path.basename(match).replace('.py', '')
@@ -222,8 +222,7 @@ class OpenShiftAnsibleGenerateValidation(Command):
generate_files = find_files('roles',
['inventory',
'test',
'playbooks',
'utils'],
'playbooks'],
None,
'generate.py$')

48
utils/.gitignore vendored
View File

@@ -1,48 +0,0 @@
package/
# Backup files
*.~
# Byte-compiled / optimized / DLL files
__pycache__/
*.py[cod]
# C extensions
*.so
# Distribution / packaging
bin/
build/
develop-eggs/
dist/
eggs/
lib/
lib64/
parts/
sdist/
var/
*.egg-info/
.installed.cfg
*.egg
MANIFEST
# Installer logs
pip-log.txt
pip-delete-this-directory.txt
# Unit test / coverage reports
.tox/
.coverage
.cache
.noseids
nosetests.xml
coverage.xml
# Translations
*.mo
# Sphinx documentation
docs/_build/
oo-install
oo-installenv
cover

View File

@@ -1,18 +0,0 @@
# Running Tests
All tests can be run by running `tox`. See [running tests](..//CONTRIBUTING.md#running-tests) for more information.
# Running From Source
You will need to setup a **virtualenv** to run from source:
$ virtualenv oo-install
$ source oo-install/bin/activate
$ python setup.py develop
The virtualenv `bin` directory should now be at the start of your
`$PATH`, and `oo-install` is ready to use from your shell.
You can exit the virtualenv with:
$ deactivate

View File

@@ -1,84 +0,0 @@
# oo-install Supported Configuration File
Upon completion oo-install will write out a configuration file representing the settings that were gathered and used. This configuration file, or one crafted by hand, can be used to run or re-run the installer and add additional hosts, upgrade, or re-install.
The default location this config file will be written to ~/.config/openshift/installer.cfg.yml.
## Example
```
version: v2
variant: openshift-enterprise
variant_version: 3.3
deployment:
ansible_ssh_user: root
hosts:
- connect_to: 24.222.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
roles:
- master
- node
containerized: true
- connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
roles:
- node
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
roles:
- node
roles:
master:
node:
```
## Primary Settings
### version
Indicates the version of configuration this file was written with. Current implementation is v1.
### variant
The OpenShift variant to install. Currently valid options are:
* openshift-enterprise
### variant_version (optional)
Default: Latest version for your chosen variant.
A version which must be valid for your selected variant. If not specified the latest will be assumed.
Examples: 3.0, 3.1, etc.
### hosts
This section defines a list of the hosts you wish to install the OpenShift master/node service on.
*ip* or *hostname* must be specified so the installer can connect to the system to gather facts before proceeding with the install.
If *public_ip* or *public_hostname* are not specified, this information will be gathered from the facts and the user will be asked to confirm in an editor. For an unattended install, the installer will error out. (you must provide complete host records for an unattended install)
*master* and *node* determine the type of services that will be installed. One of these must be set to true for the configuration file to be considered valid.
*containerized* indicates you want to run OpenShift services in a container on this host.
### ansible_ssh_user
Default: root
Defines the user ansible will use to ssh to remote systems for gathering facts and the installation.
### ansible_log_path
Default: /tmp/ansible.log

View File

@@ -1,200 +0,0 @@
'\" t
.\" Title: atomic-openshift-installer
.\" Author: [see the "AUTHOR" section]
.\" Generator: DocBook XSL Stylesheets v1.78.1 <http://docbook.sf.net/>
.\" Date: 12/28/2016
.\" Manual: atomic-openshift-installer
.\" Source: atomic-openshift-utils 1.4
.\" Language: English
.\"
.TH "ATOMIC\-OPENSHIFT\-I" "1" "12/28/2016" "atomic\-openshift\-utils 1\&.4" "atomic\-openshift\-installer"
.\" -----------------------------------------------------------------
.\" * Define some portability stuff
.\" -----------------------------------------------------------------
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.\" http://bugs.debian.org/507673
.\" http://lists.gnu.org/archive/html/groff/2009-02/msg00013.html
.\" ~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~~
.ie \n(.g .ds Aq \(aq
.el .ds Aq '
.\" -----------------------------------------------------------------
.\" * set default formatting
.\" -----------------------------------------------------------------
.\" disable hyphenation
.nh
.\" disable justification (adjust text to left margin only)
.ad l
.\" -----------------------------------------------------------------
.\" * MAIN CONTENT STARTS HERE *
.\" -----------------------------------------------------------------
.SH "NAME"
atomic-openshift-installer \- Interactive OpenShift Container Platform (OCP) installer
.SH "SYNOPSIS"
.sp
atomic\-openshift\-installer [OPTIONS] COMMAND [OPTS]
.SH "DESCRIPTION"
.sp
\fBatomic\-openshift\-installer\fR makes the process for installing OCP easier by interactively gathering the data needed to run on each host\&. It can also be run in unattended mode if provided with a configuration file\&.
.SH "OPTIONS"
.sp
The following options are common to all commands\&.
.PP
\fB\-u\fR, \fB\-\-unattended\fR
.RS 4
Run installer in
\fBunattended\fR
mode\&. You will not be prompted to answer any questions\&.
.RE
.PP
\fB\-c\fR, \fB\-\-configuration\fR \fIPATH\fR
.RS 4
Provide an alternate
\fIPATH\fR
to an
\fIinstaller\&.cfg\&.yml\fR
file\&.
.RE
.PP
\fB\-a\fR \fIDIRECTORY\fR, \fB\-\-ansible\-playbook\-directory\fR \fIDIRECTORY\fR
.RS 4
Manually set the
\fIDIRECTORY\fR
in which to look for Ansible playbooks\&.
.RE
.PP
\fB\-\-ansible\-log\-path\fR \fIPATH\fR
.RS 4
Specify the
\fIPATH\fR
of the directory in which to save Ansible logs\&.
.RE
.PP
\fB\-v\fR, \fB\-\-verbose\fR
.RS 4
Run the installer with more verbosity\&.
.RE
.PP
\fB\-d\fR, \fB\-\-debug\fR
.RS 4
Enable installer debugging\&. Logs are saved in
\fI/tmp/installer\&.txt\fR\&.
.RE
.PP
\fB\-h\fR, \fB\-\-help\fR
.RS 4
Show the usage help and exit\&.
.RE
.SH "COMMANDS"
.sp
\fBatomic\-openshift\-installer\fR has four modes of operation:
.sp
.RS 4
.ie n \{\
\h'-04'\(bu\h'+03'\c
.\}
.el \{\
.sp -1
.IP \(bu 2.3
.\}
\fBinstall\fR
.RE
.sp
.RS 4
.ie n \{\
\h'-04'\(bu\h'+03'\c
.\}
.el \{\
.sp -1
.IP \(bu 2.3
.\}
\fBuninstall\fR
.RE
.sp
.RS 4
.ie n \{\
\h'-04'\(bu\h'+03'\c
.\}
.el \{\
.sp -1
.IP \(bu 2.3
.\}
\fBupgrade\fR
.RE
.sp
.RS 4
.ie n \{\
\h'-04'\(bu\h'+03'\c
.\}
.el \{\
.sp -1
.IP \(bu 2.3
.\}
\fBscaleup\fR
.RE
.sp
The options specific to each command are described in the following sections\&.
.SH "INSTALL"
.sp
The \fBinstall\fR command will guide you through steps required to install an OCP cluster\&. After all of the required information has been collected (target hosts, storage options, high\-availability), the installation will begin\&.
.PP
\fB\-f\fR, \fB\-\-force\fR
.RS 4
Forces an installation\&. This means that hosts with existing installations will be reinstalled if required\&.
.RE
.PP
\fB\-\-gen\-inventory\fR
.RS 4
Generate an Ansible inventory file and exit\&. The default location for the inventory file is
\fI~/\&.config/openshift/hosts\fR\&.
.RE
.SH "UNINSTALL"
.sp
The \fBuninstall\fR command will uninstall OCP from your target hosts\&. This command has no additional options\&.
.SH "UPGRADE"
.sp
The \fBupgrade\fR command will upgrade a cluster of hosts to a newer version of OCP\&.
.PP
\fB\-l\fR, \fB\-\-latest\-minor\fR
.RS 4
Upgrade to the latest minor version\&. For example, if you are running version
\fB3\&.2\&.1\fR
then this could upgrade you to
\fB3\&.2\&.2\fR\&.
.RE
.PP
\fB\-n\fR, \fB\-\-next\-major\fR
.RS 4
Upgrade to the latest major version\&. For example, if you are running version
\fB3\&.2\fR
then this could upgrade you to
\fB3\&.3\fR\&.
.RE
.SH "SCALEUP"
.sp
The \fBscaleup\fR command is used to add new nodes to an existing cluster\&. This command has no additional options\&.
.SH "FILES"
.sp
\fB~/\&.config/openshift/installer\&.cfg\&.yml\fR \(em Installer configuration file\&. Can be used to generate an inventory later or start an unattended installation\&.
.sp
\fB~/\&.config/openshift/hosts\fR \(em Generated Ansible inventory file\&. Used to run the Ansible playbooks for install, uninstall, and upgrades\&.
.sp
\fB/tmp/ansible\&.log\fR \(em The default location of the ansible log file\&.
.sp
\fB/tmp/installer\&.txt\fR \(em The location of the log file for debugging the installer\&.
.SH "AUTHOR"
.sp
Red Hat OpenShift Productization team
.sp
For a complete list of contributors, please visit the GitHub charts page\&.
.SH "COPYRIGHT"
.sp
Copyright \(co 2016 Red Hat, Inc\&.
.sp
\fBatomic\-openshift\-installer\fR is released under the terms of the ASL 2\&.0 license\&.
.SH "SEE ALSO"
.sp
\fBansible\fR(1), \fBansible\-playbook\fR(1)
.sp
\fBThe openshift\-ansible GitHub Project\fR \(em https://github\&.com/openshift/openshift\-ansible/
.sp
\fBThe atomic\-openshift\-installer Documentation\fR \(em https://docs\&.openshift\&.com/container\-platform/3\&.3/install_config/install/quick_install\&.html

View File

@@ -1,173 +0,0 @@
atomic-openshift-installer(1)
=============================
:man source: atomic-openshift-utils
:man version: %VERSION%
:man manual: atomic-openshift-installer
NAME
----
atomic-openshift-installer - Interactive OpenShift Container Platform (OCP) installer
SYNOPSIS
--------
atomic-openshift-installer [OPTIONS] COMMAND [OPTS]
DESCRIPTION
-----------
**atomic-openshift-installer** makes the process for installing OCP
easier by interactively gathering the data needed to run on each
host. It can also be run in unattended mode if provided with a
configuration file.
OPTIONS
-------
The following options are common to all commands.
*-u*, *--unattended*::
Run installer in **unattended** mode. You will not be prompted to
answer any questions.
*-c*, *--configuration* 'PATH'::
Provide an alternate 'PATH' to an 'installer.cfg.yml' file.
*-a* 'DIRECTORY', *--ansible-playbook-directory* 'DIRECTORY'::
Manually set the 'DIRECTORY' in which to look for Ansible playbooks.
*--ansible-log-path* 'PATH'::
Specify the 'PATH' of the directory in which to save Ansible logs.
*-v*, *--verbose*::
Run the installer with more verbosity.
*-d*, *--debug*::
Enable installer debugging. Logs are saved in '/tmp/installer.txt'.
*-h*, *--help*::
Show the usage help and exit.
COMMANDS
--------
**atomic-openshift-installer** has four modes of operation:
* **install**
* **uninstall**
* **upgrade**
* **scaleup**
The options specific to each command are described in the following
sections.
INSTALL
-------
The **install** command will guide you through steps required to
install an OCP cluster. After all of the required information has been
collected (target hosts, storage options, high-availability), the
installation will begin.
*-f*, *--force*::
Forces an installation. This means that hosts with existing
installations will be reinstalled if required.
*--gen-inventory*::
Generate an Ansible inventory file and exit. The default location for
the inventory file is '~/.config/openshift/hosts'.
UNINSTALL
---------
The **uninstall** command will uninstall OCP from your target
hosts. This command has no additional options.
UPGRADE
-------
The **upgrade** command will upgrade a cluster of hosts to a newer
version of OCP.
*-l*, *--latest-minor*::
Upgrade to the latest minor version. For example, if you are running
version **3.2.1** then this could upgrade you to **3.2.2**.
*-n*, *--next-major*::
Upgrade to the latest major version. For example, if you are running
version **3.2** then this could upgrade you to **3.3**.
SCALEUP
-------
The **scaleup** command is used to add new nodes to an existing cluster.
This command has no additional options.
FILES
-----
*~/.config/openshift/installer.cfg.yml* -- Installer configuration
file. Can be used to generate an inventory later or start an
unattended installation.
*~/.config/openshift/hosts* -- Generated Ansible inventory file. Used
to run the Ansible playbooks for install, uninstall, and upgrades.
*/tmp/ansible.log* -- The default location of the ansible log file.
*/tmp/installer.txt* -- The location of the log file for debugging the
installer.
AUTHOR
------
Red Hat OpenShift Productization team
For a complete list of contributors, please visit the GitHub charts
page.
COPYRIGHT
---------
Copyright © 2016 Red Hat, Inc.
**atomic-openshift-installer** is released under the terms of the ASL
2.0 license.
SEE ALSO
--------
*ansible*(1), *ansible-playbook*(1)
*The openshift-ansible GitHub Project* -- <https://github.com/openshift/openshift-ansible/>
*The atomic-openshift-installer Documentation* -- <https://docs.openshift.com/container-platform/3.3/install_config/install/quick_install.html>

View File

@@ -1,5 +0,0 @@
[bdist_wheel]
# This flag says that the code is written to work on both Python 2 and Python
# 3. If at all possible, it is good practice to do this. If you cannot, you
# will need to generate wheels for each Python version that you support.
universal=1

View File

@@ -1,65 +0,0 @@
"""A setuptools based setup module.
"""
# Always prefer setuptools over distutils
from setuptools import setup
setup(
name='ooinstall',
# Versions should comply with PEP440. For a discussion on single-sourcing
# the version across setup.py and the project code, see
# https://packaging.python.org/en/latest/single_source_version.html
version="3.0.0",
description="Ansible wrapper for OpenShift Enterprise 3 installation.",
# The project's main homepage.
url="https://github.com/openshift/openshift-ansible",
# Author details
author="openshift@redhat.com",
author_email="OpenShift",
# Choose your license
license="Apache 2.0",
# See https://pypi.python.org/pypi?%3Aaction=list_classifiers
classifiers=[
'Development Status :: 4 - Beta',
'License :: OSI Approved :: Apache Software License',
'Programming Language :: Python :: 2.7',
'Topic :: Utilities',
],
# What does your project relate to?
keywords='oo-install setuptools development',
# You can just specify the packages manually here if your project is
# simple. Or you can use find_packages().
packages=['ooinstall'],
package_dir={'': 'src'},
# List run-time dependencies here. These will be installed by pip when
# your project is installed. For an analysis of "install_requires" vs pip's
# requirements files see:
# https://packaging.python.org/en/latest/requirements.html
install_requires=['click', 'PyYAML', 'ansible'],
# If there are data files included in your packages that need to be
# installed, specify them here. If using Python 2.6 or less, then these
# have to be included in MANIFEST.in as well.
package_data={
'ooinstall': ['ansible.cfg', 'ansible-quiet.cfg', 'ansible_plugins/*'],
},
# To provide executable scripts, use entry points in preference to the
# "scripts" keyword. Entry points provide cross-platform support and allow
# pip to create the appropriate form of executable for the target platform.
entry_points={
'console_scripts': [
'oo-install=ooinstall.cli_installer:cli',
],
},
)

View File

@@ -1,93 +0,0 @@
#!/bin/sh
# Grab command-line arguments
cmdlnargs="$@"
: ${OO_INSTALL_KEEP_ASSETS:="false"}
: ${OO_INSTALL_CONTEXT:="INSTALLCONTEXT"}
: ${TMPDIR:=/tmp}
: ${OO_INSTALL_LOG:=${TMPDIR}/INSTALLPKGNAME.log}
[[ $TMPDIR != */ ]] && TMPDIR="${TMPDIR}/"
if rpm -q dnf;
then
PKG_MGR="dnf"
else
PKG_MGR="yum"
fi
if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
then
clear
echo "Checking for necessary tools..."
fi
if [ -e /etc/redhat-release ]
then
for i in python python-virtualenv openssh-clients gcc
do
rpm -q $i >/dev/null 2>&1 || { echo >&2 "Missing installation dependency detected. Please run \"${PKG_MGR} install ${i}\"."; exit 1; }
done
fi
for i in python virtualenv ssh gcc
do
command -v $i >/dev/null 2>&1 || { echo >&2 "OpenShift installation requires $i on the PATH but it does not appear to be available. Correct this and rerun the installer."; exit 1; }
done
# All instances of INSTALLPKGNAME are replaced during packaging with the actual package name.
if [[ -e ./INSTALLPKGNAME.tgz ]]
then
if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
then
echo "Using bundled assets."
fi
cp INSTALLPKGNAME.tgz ${TMPDIR}/INSTALLPKGNAME.tgz
elif [[ $OO_INSTALL_KEEP_ASSETS == 'true' && -e ${TMPDIR}/INSTALLPKGNAME.tgz ]]
then
if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
then
echo "Using existing installer assets."
fi
else
echo "Downloading oo-install package to ${TMPDIR}INSTALLPKGNAME.tgz..."
curl -s -o ${TMPDIR}INSTALLPKGNAME.tgz https://install.openshift.com/INSTALLVERPATHINSTALLPKGNAME.tgz
fi
if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
then
echo "Extracting oo-install to ${TMPDIR}INSTALLPKGNAME..."
fi
tar xzf ${TMPDIR}INSTALLPKGNAME.tgz -C ${TMPDIR} 2>&1 >> $OO_INSTALL_LOG
echo "Preparing to install. This can take a minute or two..."
virtualenv ${TMPDIR}/INSTALLPKGNAME 2>&1 >> $OO_INSTALL_LOG
cd ${TMPDIR}/INSTALLPKGNAME 2>&1 >> $OO_INSTALL_LOG
source ./bin/activate 2>&1 >> $OO_INSTALL_LOG
pip install --no-index -f file:///$(readlink -f deps) ansible 2>&1 >> $OO_INSTALL_LOG
# TODO: these deps should technically be handled as part of installing ooinstall
pip install --no-index -f file:///$(readlink -f deps) click 2>&1 >> $OO_INSTALL_LOG
pip install --no-index ./src/ 2>&1 >> $OO_INSTALL_LOG
echo "Installation preparation done!" 2>&1 >> $OO_INSTALL_LOG
echo "Using `ansible --version`" 2>&1 >> $OO_INSTALL_LOG
if [ $OO_INSTALL_CONTEXT != 'origin_vm' ]
then
echo "Starting oo-install..." 2>&1 >> $OO_INSTALL_LOG
else
clear
fi
oo-install $cmdlnargs --ansible-playbook-directory ${TMPDIR}/INSTALLPKGNAME/openshift-ansible-*/ --ansible-log-path $OO_INSTALL_LOG
if [ $OO_INSTALL_KEEP_ASSETS == 'true' ]
then
echo "Keeping temporary assets in ${TMPDIR}"
else
echo "Removing temporary assets."
rm -rf ${TMPDIR}INSTALLPKGNAME
rm -rf ${TMPDIR}INSTALLPKGNAME.tgz
fi
echo "Please see $OO_INSTALL_LOG for full output."
exit

View File

@@ -1,22 +0,0 @@
= oo-install Portable Installer Package
This package is identical to the installer package that can be downloaded
and executed directly from https://install.openshift.com/.
NOTE: It will still be necessary for this installer to download RPMs from the
internet, unless you have already set up the necessary local repositories.
To run the installer from this package, run the following command:
$ ./LAUNCHERNAME
That command script and the packaged zip file can be burned to a CD or
written to a USB drive and used to run the oo-install utility in places
where the web-based installer is not reachable.
All of the command-line arguments supported by oo-install can be passed
to this launcher application.
For more information for Enterprise installs, refer to the OpenShift
Enterprise Administrator Guide:
https://docs.openshift.com/enterprise/latest/welcome/index.html

View File

@@ -1,13 +0,0 @@
A sample Python project
=======================
This is the description file for the project.
The file should use UTF-8 encoding and be written using ReStructured Text. It
will be used to generate the project webpage on PyPI, and should be written for
that purpose.
Typical contents for this file would include an overview of the project, basic
usage examples, etc. Generally, including the project changelog in here is not
a good idea, although a simple "What's New" section for the most recent version
may be appropriate.

View File

@@ -1,10 +0,0 @@
include DESCRIPTION.rst
# Include the test suite (FIXME: does not work yet)
# recursive-include tests *
# If using Python 2.6 or less, then have to include package data, even though
# it's already declared in setup.py
include ooinstall/*
include ansible.cfg
include ansible-quiet.cfg

View File

@@ -1 +0,0 @@
# pylint: disable=missing-docstring

View File

@@ -1,101 +0,0 @@
# TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,no-value-for-parameter
import os
import yaml
from ansible.plugins.callback import CallbackBase
from ansible.parsing.yaml.dumper import AnsibleDumper
from ansible.module_utils.six import u
# pylint: disable=super-init-not-called
class CallbackModule(CallbackBase):
def __init__(self):
######################
# This is ugly stoopid. This should be updated in the following ways:
# 1) it should probably only be used for the
# openshift_facts.yml playbook, so maybe there's some way to check
# a variable that's set when that playbook is run?
try:
self.hosts_yaml_name = os.environ['OO_INSTALL_CALLBACK_FACTS_YAML']
except KeyError:
raise ValueError('The OO_INSTALL_CALLBACK_FACTS_YAML environment '
'variable must be set.')
self.hosts_yaml = os.open(self.hosts_yaml_name, os.O_CREAT |
os.O_WRONLY)
def v2_on_any(self, *args, **kwargs):
pass
def v2_runner_on_failed(self, res, ignore_errors=False):
pass
# pylint: disable=protected-access
def v2_runner_on_ok(self, res):
abridged_result = res._result.copy()
# Collect facts result from playbooks/byo/openshift_facts.yml
if 'result' in abridged_result:
facts = abridged_result['result']['ansible_facts']['openshift']
hosts_yaml = {}
hosts_yaml[res._host.get_name()] = facts
to_dump = u(yaml.dump(hosts_yaml,
allow_unicode=True,
default_flow_style=False,
Dumper=AnsibleDumper))
os.write(self.hosts_yaml, to_dump)
def v2_runner_on_skipped(self, res):
pass
def v2_runner_on_unreachable(self, res):
pass
def v2_runner_on_no_hosts(self, task):
pass
def v2_runner_on_async_poll(self, res):
pass
def v2_runner_on_async_ok(self, res):
pass
def v2_runner_on_async_failed(self, res):
pass
def v2_playbook_on_start(self, playbook):
pass
def v2_playbook_on_notify(self, res, handler):
pass
def v2_playbook_on_no_hosts_matched(self):
pass
def v2_playbook_on_no_hosts_remaining(self):
pass
def v2_playbook_on_task_start(self, name, is_conditional):
pass
# pylint: disable=too-many-arguments
def v2_playbook_on_vars_prompt(self, varname, private=True, prompt=None,
encrypt=None, confirm=False, salt_size=None, salt=None, default=None):
pass
def v2_playbook_on_setup(self):
pass
def v2_playbook_on_import_for_host(self, res, imported_file):
pass
def v2_playbook_on_not_import_for_host(self, res, missing_file):
pass
def v2_playbook_on_play_start(self, play):
pass
def v2_playbook_on_stats(self, stats):
pass

File diff suppressed because it is too large Load Diff

View File

@@ -1,438 +0,0 @@
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-many-instance-attributes,too-few-public-methods
from __future__ import (absolute_import, print_function)
import os
import sys
import logging
import yaml
from pkg_resources import resource_filename
installer_log = logging.getLogger('installer')
CONFIG_PERSIST_SETTINGS = [
'ansible_ssh_user',
'ansible_callback_facts_yaml',
'ansible_inventory_path',
'ansible_log_path',
'deployment',
'version',
'variant',
'variant_subtype',
'variant_version',
]
DEPLOYMENT_VARIABLES_BLACKLIST = [
'hosts',
'roles',
]
HOST_VARIABLES_BLACKLIST = [
'ip',
'public_ip',
'hostname',
'public_hostname',
'node_labels',
'containerized',
'preconfigured',
'schedulable',
'other_variables',
'roles',
]
DEFAULT_REQUIRED_FACTS = ['ip', 'public_ip', 'hostname', 'public_hostname']
PRECONFIGURED_REQUIRED_FACTS = ['hostname', 'public_hostname']
def print_read_config_error(error, path='the configuration file'):
message = """
Error loading config. {}.
See https://docs.openshift.com/enterprise/latest/install_config/install/quick_install.html#defining-an-installation-configuration-file
for information on creating a configuration file or delete {} and re-run the installer.
"""
print(message.format(error, path))
class OOConfigFileError(Exception):
"""The provided config file path can't be read/written
"""
pass
class OOConfigInvalidHostError(Exception):
""" Host in config is missing both ip and hostname. """
pass
class Host(object):
""" A system we will or have installed OpenShift on. """
def __init__(self, **kwargs):
self.ip = kwargs.get('ip', None)
self.hostname = kwargs.get('hostname', None)
self.public_ip = kwargs.get('public_ip', None)
self.public_hostname = kwargs.get('public_hostname', None)
self.connect_to = kwargs.get('connect_to', None)
self.preconfigured = kwargs.get('preconfigured', None)
self.schedulable = kwargs.get('schedulable', None)
self.new_host = kwargs.get('new_host', None)
self.containerized = kwargs.get('containerized', False)
self.node_labels = kwargs.get('node_labels', '')
# allowable roles: master, node, etcd, storage, master_lb
self.roles = kwargs.get('roles', [])
self.other_variables = kwargs.get('other_variables', {})
if self.connect_to is None:
raise OOConfigInvalidHostError(
"You must specify either an ip or hostname as 'connect_to'")
def __str__(self):
return self.connect_to
def __repr__(self):
return self.connect_to
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['ip', 'hostname', 'public_ip', 'public_hostname', 'connect_to',
'preconfigured', 'containerized', 'schedulable', 'roles', 'node_labels', ]:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
for variable, value in self.other_variables.items():
d[variable] = value
return d
def is_master(self):
return 'master' in self.roles
def is_node(self):
return 'node' in self.roles
def is_master_lb(self):
return 'master_lb' in self.roles
def is_storage(self):
return 'storage' in self.roles
def is_etcd(self):
""" Does this host have the etcd role """
return 'etcd' in self.roles
def is_dedicated_node(self):
""" Will this host be a dedicated node. (not a master) """
return self.is_node() and not self.is_master()
def is_schedulable_node(self, all_hosts):
""" Will this host be a node marked as schedulable. """
if not self.is_node():
return False
if not self.is_master():
return True
masters = [host for host in all_hosts if host.is_master()]
nodes = [host for host in all_hosts if host.is_node()]
if len(masters) == len(nodes):
return True
return False
class Role(object):
""" A role that will be applied to a host. """
def __init__(self, name, variables):
self.name = name
self.variables = variables
def __str__(self):
return self.name
def __repr__(self):
return self.name
def to_dict(self):
""" Used when exporting to yaml. """
d = {}
for prop in ['name', 'variables']:
# If the property is defined (not None or False), export it:
if getattr(self, prop):
d[prop] = getattr(self, prop)
return d
class Deployment(object):
def __init__(self, **kwargs):
self.hosts = kwargs.get('hosts', [])
self.roles = kwargs.get('roles', {})
self.variables = kwargs.get('variables', {})
class OOConfig(object):
default_dir = os.path.normpath(
os.environ.get('XDG_CONFIG_HOME',
os.environ.get('HOME', '') + '/.config/') + '/openshift/')
default_file = '/installer.cfg.yml'
def __init__(self, config_path):
if config_path:
self.config_path = os.path.normpath(config_path)
else:
self.config_path = os.path.normpath(self.default_dir +
self.default_file)
self.deployment = Deployment(hosts=[], roles={}, variables={})
self.settings = {}
self._read_config()
self._set_defaults()
# pylint: disable=too-many-branches
# Lots of different checks ran in a single method, could
# use a little refactoring-love some time
def _read_config(self):
installer_log.debug("Attempting to read the OO Config")
try:
installer_log.debug("Attempting to see if the provided config file exists: %s", self.config_path)
if os.path.exists(self.config_path):
installer_log.debug("We think the config file exists: %s", self.config_path)
with open(self.config_path, 'r') as cfgfile:
loaded_config = yaml.safe_load(cfgfile.read())
if 'version' not in loaded_config:
print_read_config_error('Legacy configuration file found', self.config_path)
sys.exit(0)
if loaded_config.get('version', '') == 'v1':
loaded_config = self._upgrade_v1_config(loaded_config)
try:
host_list = loaded_config['deployment']['hosts']
role_list = loaded_config['deployment']['roles']
except KeyError as e:
print_read_config_error("No such key: {}".format(e), self.config_path)
sys.exit(0)
for setting in CONFIG_PERSIST_SETTINGS:
persisted_value = loaded_config.get(setting)
if persisted_value is not None:
self.settings[setting] = str(persisted_value)
installer_log.debug("config: set (%s) to value (%s)", setting, persisted_value)
# We've loaded any persisted configs, let's verify any
# paths which are required for a correct and complete
# install
# - ansible_callback_facts_yaml - Settings from a
# pervious run. If the file doesn't exist then we
# will just warn about it for now and recollect the
# facts.
if self.settings.get('ansible_callback_facts_yaml', None) is not None:
if not os.path.exists(self.settings['ansible_callback_facts_yaml']):
# Cached callback facts file does not exist
installer_log.warning("The specified 'ansible_callback_facts_yaml'"
"file does not exist (%s)",
self.settings['ansible_callback_facts_yaml'])
installer_log.debug("Remote system facts will be collected again later")
self.settings.pop('ansible_callback_facts_yaml')
for setting in loaded_config['deployment']:
try:
if setting not in DEPLOYMENT_VARIABLES_BLACKLIST:
self.deployment.variables[setting] = \
str(loaded_config['deployment'][setting])
except KeyError:
continue
# Parse the hosts into DTO objects:
for host in host_list:
host['other_variables'] = {}
for variable, value in host.items():
if variable not in HOST_VARIABLES_BLACKLIST:
host['other_variables'][variable] = value
self.deployment.hosts.append(Host(**host))
# Parse the roles into Objects
for name, variables in role_list.items():
self.deployment.roles.update({name: Role(name, variables)})
except IOError as ferr:
raise OOConfigFileError('Cannot open config file "{}": {}'.format(ferr.filename,
ferr.strerror))
except yaml.scanner.ScannerError:
raise OOConfigFileError(
'Config file "{}" is not a valid YAML document'.format(self.config_path))
installer_log.debug("Parsed the config file")
def _upgrade_v1_config(self, config):
new_config_data = {}
new_config_data['deployment'] = {}
new_config_data['deployment']['hosts'] = []
new_config_data['deployment']['roles'] = {}
new_config_data['deployment']['variables'] = {}
role_list = {}
if config.get('ansible_ssh_user', False):
new_config_data['deployment']['ansible_ssh_user'] = config['ansible_ssh_user']
if config.get('variant', False):
new_config_data['variant'] = config['variant']
if config.get('variant_version', False):
new_config_data['variant_version'] = config['variant_version']
for host in config['hosts']:
host_props = {}
host_props['roles'] = []
host_props['connect_to'] = host['connect_to']
for prop in ['ip', 'public_ip', 'hostname', 'public_hostname', 'containerized', 'preconfigured']:
host_props[prop] = host.get(prop, None)
for role in ['master', 'node', 'master_lb', 'storage', 'etcd']:
if host.get(role, False):
host_props['roles'].append(role)
role_list[role] = ''
new_config_data['deployment']['hosts'].append(host_props)
new_config_data['deployment']['roles'] = role_list
return new_config_data
def _set_defaults(self):
installer_log.debug("Setting defaults, current OOConfig settings: %s", self.settings)
if 'ansible_inventory_directory' not in self.settings:
self.settings['ansible_inventory_directory'] = self._default_ansible_inv_dir()
if not os.path.exists(self.settings['ansible_inventory_directory']):
installer_log.debug("'ansible_inventory_directory' does not exist, "
"creating it now (%s)",
self.settings['ansible_inventory_directory'])
os.makedirs(self.settings['ansible_inventory_directory'])
else:
installer_log.debug("We think this 'ansible_inventory_directory' "
"is OK: %s",
self.settings['ansible_inventory_directory'])
if 'ansible_plugins_directory' not in self.settings:
self.settings['ansible_plugins_directory'] = \
resource_filename(__name__, 'ansible_plugins')
installer_log.debug("We think the ansible plugins directory should be: %s (it is not already set)",
self.settings['ansible_plugins_directory'])
else:
installer_log.debug("The ansible plugins directory is already set: %s",
self.settings['ansible_plugins_directory'])
if 'version' not in self.settings:
self.settings['version'] = 'v2'
if 'ansible_callback_facts_yaml' not in self.settings:
installer_log.debug("No 'ansible_callback_facts_yaml' in self.settings")
self.settings['ansible_callback_facts_yaml'] = '%s/callback_facts.yaml' % \
self.settings['ansible_inventory_directory']
installer_log.debug("Value: %s", self.settings['ansible_callback_facts_yaml'])
else:
installer_log.debug("'ansible_callback_facts_yaml' already set "
"in self.settings: %s",
self.settings['ansible_callback_facts_yaml'])
if 'ansible_ssh_user' not in self.settings:
self.settings['ansible_ssh_user'] = ''
if 'ansible_inventory_path' not in self.settings:
self.settings['ansible_inventory_path'] = \
'{}/hosts'.format(os.path.dirname(self.config_path))
# clean up any empty sets
empty_keys = []
for setting in self.settings:
if not self.settings[setting]:
empty_keys.append(setting)
for key in empty_keys:
self.settings.pop(key)
installer_log.debug("Updated OOConfig settings: %s", self.settings)
def _default_ansible_inv_dir(self):
return os.path.normpath(
os.path.dirname(self.config_path) + "/.ansible")
def calc_missing_facts(self):
"""
Determine which host facts are not defined in the config.
Returns a hash of host to a list of the missing facts.
"""
result = {}
for host in self.deployment.hosts:
missing_facts = []
if host.preconfigured:
required_facts = PRECONFIGURED_REQUIRED_FACTS
else:
required_facts = DEFAULT_REQUIRED_FACTS
for required_fact in required_facts:
if not getattr(host, required_fact):
missing_facts.append(required_fact)
if len(missing_facts) > 0:
result[host.connect_to] = missing_facts
return result
def save_to_disk(self):
out_file = open(self.config_path, 'w')
out_file.write(self.yaml())
out_file.close()
def persist_settings(self):
p_settings = {}
for setting in CONFIG_PERSIST_SETTINGS:
if setting in self.settings and self.settings[setting]:
p_settings[setting] = self.settings[setting]
p_settings['deployment'] = {}
p_settings['deployment']['hosts'] = []
p_settings['deployment']['roles'] = {}
for host in self.deployment.hosts:
p_settings['deployment']['hosts'].append(host.to_dict())
for name, role in self.deployment.roles.items():
p_settings['deployment']['roles'][name] = role.variables
for setting in self.deployment.variables:
if setting not in DEPLOYMENT_VARIABLES_BLACKLIST:
p_settings['deployment'][setting] = self.deployment.variables[setting]
try:
p_settings['variant'] = self.settings['variant']
p_settings['variant_version'] = self.settings['variant_version']
if self.settings['ansible_inventory_directory'] != self._default_ansible_inv_dir():
p_settings['ansible_inventory_directory'] = self.settings['ansible_inventory_directory']
except KeyError as e:
print("Error persisting settings: {}".format(e))
sys.exit(0)
return p_settings
def yaml(self):
return yaml.safe_dump(self.persist_settings(), default_flow_style=False)
def __str__(self):
return self.yaml()
def get_host_roles_set(self):
roles_set = set()
for host in self.deployment.hosts:
for role in host.roles:
roles_set.add(role)
return roles_set

View File

@@ -1,338 +0,0 @@
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,global-statement,global-variable-not-assigned
from __future__ import (absolute_import, print_function)
import socket
import subprocess
import sys
import os
import logging
import yaml
from ooinstall.variants import find_variant
from ooinstall.utils import debug_env
installer_log = logging.getLogger('installer')
CFG = None
ROLES_TO_GROUPS_MAP = {
'master': 'masters',
'node': 'nodes',
'etcd': 'etcd',
'storage': 'nfs',
'master_lb': 'lb'
}
VARIABLES_MAP = {
'ansible_ssh_user': 'ansible_ssh_user',
'deployment_type': 'deployment_type',
'variant_subtype': 'deployment_subtype',
'master_routingconfig_subdomain': 'openshift_master_default_subdomain',
'proxy_http': 'openshift_http_proxy',
'proxy_https': 'openshift_https_proxy',
'proxy_exclude_hosts': 'openshift_no_proxy',
}
HOST_VARIABLES_MAP = {
'ip': 'openshift_ip',
'public_ip': 'openshift_public_ip',
'hostname': 'openshift_hostname',
'public_hostname': 'openshift_public_hostname',
'containerized': 'containerized',
}
def set_config(cfg):
global CFG
CFG = cfg
def generate_inventory(hosts):
global CFG
new_nodes = [host for host in hosts if host.is_node() and host.new_host]
scaleup = len(new_nodes) > 0
lb = determine_lb_configuration(hosts)
base_inventory_path = CFG.settings['ansible_inventory_path']
base_inventory = open(base_inventory_path, 'w')
write_inventory_children(base_inventory, scaleup)
write_inventory_vars(base_inventory, lb)
# write_inventory_hosts
for role in CFG.deployment.roles:
# write group block
group = ROLES_TO_GROUPS_MAP.get(role, role)
base_inventory.write("\n[{}]\n".format(group))
# write each host
group_hosts = [host for host in hosts if role in host.roles]
for host in group_hosts:
schedulable = host.is_schedulable_node(hosts)
write_host(host, role, base_inventory, schedulable)
if scaleup:
base_inventory.write('\n[new_nodes]\n')
for node in new_nodes:
write_host(node, 'new_nodes', base_inventory)
base_inventory.close()
return base_inventory_path
def determine_lb_configuration(hosts):
lb = next((host for host in hosts if host.is_master_lb()), None)
if lb:
if lb.hostname is None:
lb.hostname = lb.connect_to
lb.public_hostname = lb.connect_to
return lb
def write_inventory_children(base_inventory, scaleup):
global CFG
base_inventory.write('\n[OSEv3:children]\n')
for role in CFG.deployment.roles:
child = ROLES_TO_GROUPS_MAP.get(role, role)
base_inventory.write('{}\n'.format(child))
if scaleup:
base_inventory.write('new_nodes\n')
# pylint: disable=too-many-branches
def write_inventory_vars(base_inventory, lb):
global CFG
base_inventory.write('\n[OSEv3:vars]\n')
for variable, value in CFG.settings.items():
inventory_var = VARIABLES_MAP.get(variable, None)
if inventory_var and value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
for variable, value in CFG.deployment.variables.items():
inventory_var = VARIABLES_MAP.get(variable, variable)
if value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
if CFG.deployment.variables['ansible_ssh_user'] != 'root':
base_inventory.write('ansible_become=yes\n')
base_inventory.write('openshift_hostname_check=false\n')
if lb is not None:
base_inventory.write("openshift_master_cluster_hostname={}\n".format(lb.hostname))
base_inventory.write(
"openshift_master_cluster_public_hostname={}\n".format(lb.public_hostname))
if CFG.settings.get('variant_version', None) == '3.1':
# base_inventory.write('openshift_image_tag=v{}\n'.format(CFG.settings.get('variant_version')))
base_inventory.write('openshift_image_tag=v{}\n'.format('3.1.1.6'))
write_proxy_settings(base_inventory)
# Find the correct deployment type for ansible:
ver = find_variant(CFG.settings['variant'],
version=CFG.settings.get('variant_version', None))[1]
base_inventory.write('deployment_type={}\n'.format(ver.ansible_key))
if getattr(ver, 'variant_subtype', False):
base_inventory.write('deployment_subtype={}\n'.format(ver.deployment_subtype))
if 'OO_INSTALL_ADDITIONAL_REGISTRIES' in os.environ:
base_inventory.write('openshift_docker_additional_registries={}\n'.format(
os.environ['OO_INSTALL_ADDITIONAL_REGISTRIES']))
if 'OO_INSTALL_INSECURE_REGISTRIES' in os.environ:
base_inventory.write('openshift_docker_insecure_registries={}\n'.format(
os.environ['OO_INSTALL_INSECURE_REGISTRIES']))
if 'OO_INSTALL_PUDDLE_REPO' in os.environ:
# We have to double the '{' here for literals
base_inventory.write("openshift_additional_repos=[{{'id': 'ose-devel', "
"'name': 'ose-devel', "
"'baseurl': '{}', "
"'enabled': 1, 'gpgcheck': 0}}]\n".format(os.environ['OO_INSTALL_PUDDLE_REPO']))
for name, role_obj in CFG.deployment.roles.items():
if role_obj.variables:
group_name = ROLES_TO_GROUPS_MAP.get(name, name)
base_inventory.write("\n[{}:vars]\n".format(group_name))
for variable, value in role_obj.variables.items():
inventory_var = VARIABLES_MAP.get(variable, variable)
if value:
base_inventory.write('{}={}\n'.format(inventory_var, value))
base_inventory.write("\n")
def write_proxy_settings(base_inventory):
try:
base_inventory.write("openshift_http_proxy={}\n".format(
CFG.settings['openshift_http_proxy']))
except KeyError:
pass
try:
base_inventory.write("openshift_https_proxy={}\n".format(
CFG.settings['openshift_https_proxy']))
except KeyError:
pass
try:
base_inventory.write("openshift_no_proxy={}\n".format(
CFG.settings['openshift_no_proxy']))
except KeyError:
pass
def write_host(host, role, inventory, schedulable=None):
global CFG
if host.preconfigured:
return
facts = ''
for prop in HOST_VARIABLES_MAP:
if getattr(host, prop):
facts += ' {}={}'.format(HOST_VARIABLES_MAP.get(prop), getattr(host, prop))
if host.other_variables:
for variable, value in host.other_variables.items():
facts += " {}={}".format(variable, value)
if host.node_labels and role == 'node':
facts += ' openshift_node_labels="{}"'.format(host.node_labels)
# Distinguish between three states, no schedulability specified (use default),
# explicitly set to True, or explicitly set to False:
if role != 'node' or schedulable is None:
pass
else:
facts += " openshift_schedulable={}".format(schedulable)
installer_host = socket.gethostname()
if installer_host in [host.connect_to, host.hostname, host.public_hostname]:
facts += ' ansible_connection=local'
if os.geteuid() != 0:
no_pwd_sudo = subprocess.call(['sudo', '-n', 'echo', '-n'])
if no_pwd_sudo == 1:
print('The atomic-openshift-installer requires sudo access without a password.')
sys.exit(1)
facts += ' ansible_become=yes'
inventory.write('{} {}\n'.format(host.connect_to, facts))
def load_system_facts(inventory_file, os_facts_path, env_vars, verbose=False):
"""
Retrieves system facts from the remote systems.
"""
installer_log.debug("Inside load_system_facts")
installer_log.debug("load_system_facts will run with Ansible/Openshift environment variables:")
debug_env(env_vars)
FNULL = open(os.devnull, 'w')
args = ['ansible-playbook', '-v'] if verbose \
else ['ansible-playbook']
args.extend([
'--inventory-file={}'.format(inventory_file),
os_facts_path])
installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
installer_log.debug("Subprocess will run with Ansible/Openshift environment variables:")
debug_env(env_vars)
status = subprocess.call(args, env=env_vars, stdout=FNULL)
if status != 0:
installer_log.debug("Exit status from subprocess was not 0")
return [], 1
with open(CFG.settings['ansible_callback_facts_yaml'], 'r') as callback_facts_file:
installer_log.debug("Going to try to read this file: %s", CFG.settings['ansible_callback_facts_yaml'])
try:
callback_facts = yaml.safe_load(callback_facts_file)
except yaml.YAMLError as exc:
print("Error in {}".format(CFG.settings['ansible_callback_facts_yaml']), exc)
print("Try deleting and rerunning the atomic-openshift-installer")
sys.exit(1)
return callback_facts, 0
def default_facts(hosts, verbose=False):
global CFG
installer_log.debug("Current global CFG vars here: %s", CFG)
inventory_file = generate_inventory(hosts)
os_facts_path = '{}/playbooks/byo/openshift_facts.yml'.format(CFG.ansible_playbook_directory)
facts_env = os.environ.copy()
facts_env["OO_INSTALL_CALLBACK_FACTS_YAML"] = CFG.settings['ansible_callback_facts_yaml']
facts_env["ANSIBLE_CALLBACK_PLUGINS"] = CFG.settings['ansible_plugins_directory']
if 'ansible_log_path' in CFG.settings:
facts_env["ANSIBLE_LOG_PATH"] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
installer_log.debug("facts_env: %s", facts_env)
installer_log.debug("Going to 'load_system_facts' next")
return load_system_facts(inventory_file, os_facts_path, facts_env, verbose)
def run_prerequisites(inventory_file, hosts, hosts_to_run_on, verbose=False):
global CFG
prerequisites_playbook_path = os.path.join(CFG.ansible_playbook_directory,
'playbooks/prerequisites.yml')
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
# override the ansible config for prerequisites playbook run
if 'ansible_quiet_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
return run_ansible(prerequisites_playbook_path, inventory_file, facts_env, verbose)
def run_main_playbook(inventory_file, hosts, hosts_to_run_on, verbose=False):
global CFG
if len(hosts_to_run_on) != len(hosts):
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
'playbooks/openshift-node/scaleup.yml')
else:
main_playbook_path = os.path.join(CFG.ansible_playbook_directory,
'playbooks/deploy_cluster.yml')
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
# override the ansible config for our main playbook run
if 'ansible_quiet_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
return run_ansible(main_playbook_path, inventory_file, facts_env, verbose)
def run_ansible(playbook, inventory, env_vars, verbose=False):
installer_log.debug("run_ansible will run with Ansible/Openshift environment variables:")
debug_env(env_vars)
args = ['ansible-playbook', '-v'] if verbose \
else ['ansible-playbook']
args.extend([
'--inventory-file={}'.format(inventory),
playbook])
installer_log.debug("Going to subprocess out to ansible now with these args: %s", ' '.join(args))
return subprocess.call(args, env=env_vars)
def run_uninstall_playbook(hosts, verbose=False):
playbook = os.path.join(CFG.settings['ansible_playbook_directory'],
'playbooks/adhoc/uninstall.yml')
inventory_file = generate_inventory(hosts)
facts_env = os.environ.copy()
if 'ansible_log_path' in CFG.settings:
facts_env['ANSIBLE_LOG_PATH'] = CFG.settings['ansible_log_path']
if 'ansible_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_config']
# override the ansible config for our main playbook run
if 'ansible_quiet_config' in CFG.settings:
facts_env['ANSIBLE_CONFIG'] = CFG.settings['ansible_quiet_config']
return run_ansible(playbook, inventory_file, facts_env, verbose)

View File

@@ -1,24 +0,0 @@
# pylint: disable=missing-docstring,invalid-name
import logging
import re
installer_log = logging.getLogger('installer')
def debug_env(env):
for k in sorted(env.keys()):
if k.startswith("OPENSHIFT") or k.startswith("ANSIBLE") or k.startswith("OO"):
# pylint: disable=logging-format-interpolation
installer_log.debug("{key}: {value}".format(
key=k, value=env[k]))
def is_valid_hostname(hostname):
if not hostname or len(hostname) > 255:
return False
if hostname[-1] == ".":
hostname = hostname[:-1] # strip exactly one dot from the right, if present
allowed = re.compile(r"(?!-)[A-Z\d-]{1,63}(?<!-)$", re.IGNORECASE)
return all(allowed.match(x) for x in hostname.split("."))

View File

@@ -1,92 +0,0 @@
# TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name,too-few-public-methods
"""
Defines the supported variants and versions the installer supports, and metadata
required to run Ansible correctly.
This module needs to be updated for each major release to allow the new version
to be specified by the user, and to point the generic variants to the latest
version.
"""
import logging
installer_log = logging.getLogger('installer')
class Version(object):
def __init__(self, name, ansible_key, subtype=''):
self.name = name # i.e. 3.0, 3.1
self.ansible_key = ansible_key
self.subtype = subtype
class Variant(object):
def __init__(self, name, description, versions):
# Supported variant name:
self.name = name
# Friendly name for the variant:
self.description = description
self.versions = versions
def latest_version(self):
return self.versions[0]
# WARNING: Keep the versions ordered, most recent first:
OSE = Variant('openshift-enterprise', 'OpenShift Container Platform', [
Version('3.9', 'openshift-enterprise'),
])
REG = Variant('openshift-enterprise', 'Registry', [
Version('3.9', 'openshift-enterprise', 'registry'),
])
origin = Variant('origin', 'OpenShift Origin', [
Version('3.9', 'origin'),
])
LEGACY = Variant('openshift-enterprise', 'OpenShift Container Platform', [
Version('3.7', 'openshift-enterprise'),
Version('3.6', 'openshift-enterprise'),
Version('3.5', 'openshift-enterprise'),
Version('3.4', 'openshift-enterprise'),
Version('3.3', 'openshift-enterprise'),
Version('3.2', 'openshift-enterprise'),
Version('3.1', 'openshift-enterprise'),
Version('3.0', 'openshift-enterprise'),
])
# Ordered list of variants we can install, first is the default.
SUPPORTED_VARIANTS = (OSE, REG, origin, LEGACY)
DISPLAY_VARIANTS = (OSE, REG, origin)
def find_variant(name, version=None):
"""
Locate the variant object for the variant given in config file, and
the correct version to use for it.
Return (None, None) if we can't find a match.
"""
prod = None
for prod in SUPPORTED_VARIANTS:
if prod.name == name:
if version is None:
return (prod, prod.latest_version())
for v in prod.versions:
if v.name == version:
return (prod, v)
return (None, None)
def get_variant_version_combos():
combos = []
for variant in DISPLAY_VARIANTS:
for ver in variant.versions:
combos.append((variant, ver))
return combos

View File

File diff suppressed because it is too large Load Diff

View File

@@ -1,254 +0,0 @@
# pylint: disable=missing-docstring
import os
import yaml
import ooinstall.cli_installer as cli
from test.oo_config_tests import OOInstallFixture
from click.testing import CliRunner
# Substitute in a product name before use:
SAMPLE_CONFIG = """
variant: %s
variant_version: 3.3
master_routingconfig_subdomain: example.com
version: v2
deployment:
ansible_ssh_user: root
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
roles:
- master
- node
- connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
roles:
- node
- connect_to: 10.0.0.3
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
roles:
- node
roles:
master:
node:
"""
def read_yaml(config_file_path):
cfg_f = open(config_file_path, 'r')
config = yaml.safe_load(cfg_f.read())
cfg_f.close()
return config
class OOCliFixture(OOInstallFixture):
def setUp(self):
OOInstallFixture.setUp(self)
self.runner = CliRunner()
# Add any arguments you would like to test here, the defaults ensure
# we only do unattended invocations here, and using temporary files/dirs.
self.cli_args = ["-a", self.work_dir]
def run_cli(self):
return self.runner.invoke(cli.cli, self.cli_args)
def assert_result(self, result, exit_code):
if result.exit_code != exit_code:
msg = ["Unexpected result from CLI execution\n"]
msg.append("Exit code: %s\n" % result.exit_code)
msg.append("Exception: %s\n" % result.exception)
import traceback
msg.extend(traceback.format_exception(*result.exc_info))
msg.append("Output:\n%s" % result.output)
self.fail("".join(msg))
def _verify_load_facts(self, load_facts_mock):
""" Check that we ran load facts with expected inputs. """
load_facts_args = load_facts_mock.call_args[0]
self.assertEquals(os.path.join(self.work_dir, "hosts"),
load_facts_args[0])
self.assertEquals(os.path.join(self.work_dir,
"playbooks/byo/openshift_facts.yml"),
load_facts_args[1])
env_vars = load_facts_args[2]
self.assertEquals(os.path.join(self.work_dir,
'.ansible/callback_facts.yaml'),
env_vars['OO_INSTALL_CALLBACK_FACTS_YAML'])
self.assertEqual('/tmp/ansible.log', env_vars['ANSIBLE_LOG_PATH'])
def _verify_run_playbook(self, run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len):
""" Check that we ran playbook with expected inputs. """
hosts = run_playbook_mock.call_args[0][1]
hosts_to_run_on = run_playbook_mock.call_args[0][2]
self.assertEquals(exp_hosts_len, len(hosts))
self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
def _verify_config_hosts(self, written_config, host_count):
self.assertEquals(host_count, len(written_config['deployment']['hosts']))
for host in written_config['deployment']['hosts']:
self.assertTrue('hostname' in host)
self.assertTrue('public_hostname' in host)
if 'preconfigured' not in host:
if 'roles' in host:
self.assertTrue('node' in host['roles'] or 'storage' in host['roles'])
self.assertTrue('ip' in host)
self.assertTrue('public_ip' in host)
# pylint: disable=too-many-arguments
def _verify_get_hosts_to_run_on(self, mock_facts, load_facts_mock,
run_playbook_mock, cli_input,
exp_hosts_len=None, exp_hosts_to_run_on_len=None,
force=None):
"""
Tests cli_installer.py:get_hosts_to_run_on. That method has quite a
few subtle branches in the logic. The goal with this method is simply
to handle all the messy stuff here and allow the main test cases to be
easily read. The basic idea is to modify mock_facts to return a
version indicating OpenShift is already installed on particular hosts.
"""
load_facts_mock.return_value = (mock_facts, 0)
run_playbook_mock.return_value = 0
if cli_input:
self.cli_args.append("install")
result = self.runner.invoke(cli.cli,
self.cli_args,
input=cli_input)
else:
config_file = self.write_config(
os.path.join(self.work_dir,
'ooinstall.conf'), SAMPLE_CONFIG % 'openshift-enterprise')
self.cli_args.extend(["-c", config_file, "install"])
if force:
self.cli_args.append("--force")
result = self.runner.invoke(cli.cli, self.cli_args)
written_config = read_yaml(config_file)
self._verify_config_hosts(written_config, exp_hosts_len)
if "If you want to force reinstall" in result.output:
# verify we exited on seeing installed hosts
self.assertEqual(result.exit_code, 1)
else:
self.assert_result(result, 0)
self._verify_load_facts(load_facts_mock)
self._verify_run_playbook(run_playbook_mock, exp_hosts_len, exp_hosts_to_run_on_len)
# Make sure we ran on the expected masters and nodes:
hosts = run_playbook_mock.call_args[0][1]
hosts_to_run_on = run_playbook_mock.call_args[0][2]
self.assertEquals(exp_hosts_len, len(hosts))
self.assertEquals(exp_hosts_to_run_on_len, len(hosts_to_run_on))
# pylint: disable=too-many-arguments,too-many-branches,too-many-statements
def build_input(ssh_user=None, hosts=None, variant_num=None,
add_nodes=None, confirm_facts=None, schedulable_masters_ok=None,
master_lb=('', False), storage=None):
"""
Build an input string simulating a user entering values in an interactive
attended install.
This is intended to give us one place to update when the CLI prompts change.
We should aim to keep this dependent on optional keyword arguments with
sensible defaults to keep things from getting too fragile.
"""
inputs = [
'y', # let's proceed
]
if ssh_user:
inputs.append(ssh_user)
if variant_num:
inputs.append(str(variant_num)) # Choose variant + version
num_masters = 0
if hosts:
i = 0
for (host, is_master, is_containerized) in hosts:
inputs.append(host)
if is_master:
inputs.append('y')
num_masters += 1
else:
inputs.append('n')
if is_containerized:
inputs.append('container')
else:
inputs.append('rpm')
# inputs.append('rpm')
# We should not be prompted to add more hosts if we're currently at
# 2 masters, this is an invalid HA configuration, so this question
# will not be asked, and the user must enter the next host:
if num_masters != 2:
if i < len(hosts) - 1:
if num_masters >= 1:
inputs.append('y') # Add more hosts
else:
inputs.append('n') # Done adding hosts
i += 1
# You can pass a single master_lb or a list if you intend for one to get rejected:
if isinstance(master_lb[0], list) or isinstance(master_lb[0], tuple):
inputs.extend(master_lb[0])
else:
inputs.append(master_lb[0])
if master_lb[0]:
inputs.append('y' if master_lb[1] else 'n')
if storage:
inputs.append(storage)
inputs.append('subdomain.example.com')
inputs.append('proxy.example.com')
inputs.append('proxy-private.example.com')
inputs.append('exclude.example.com')
# TODO: support option 2, fresh install
if add_nodes:
if schedulable_masters_ok:
inputs.append('y')
inputs.append('1') # Add more nodes
i = 0
for (host, _, is_containerized) in add_nodes:
inputs.append(host)
if is_containerized:
inputs.append('container')
else:
inputs.append('rpm')
# inputs.append('rpm')
if i < len(add_nodes) - 1:
inputs.append('y') # Add more hosts
else:
inputs.append('n') # Done adding hosts
i += 1
if add_nodes is None:
total_hosts = hosts
else:
total_hosts = hosts + add_nodes
if total_hosts is not None and num_masters == len(total_hosts):
inputs.append('y')
inputs.extend([
confirm_facts,
'y', # lets do this
'y',
])
return '\n'.join(inputs)

View File

@@ -1,267 +0,0 @@
# TODO: Temporarily disabled due to importing old code into openshift-ansible
# repo. We will work on these over time.
# pylint: disable=bad-continuation,missing-docstring,no-self-use,invalid-name
import os
import unittest
import tempfile
import shutil
import yaml
from six.moves import cStringIO
from ooinstall.oo_config import OOConfig, Host, OOConfigInvalidHostError
import ooinstall.openshift_ansible
SAMPLE_CONFIG = """
variant: openshift-enterprise
variant_version: 3.3
version: v2
deployment:
ansible_ssh_user: root
hosts:
- connect_to: master-private.example.com
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
roles:
- master
- node
- connect_to: node1-private.example.com
ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
roles:
- node
- connect_to: node2-private.example.com
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
roles:
- node
roles:
master:
node:
"""
CONFIG_INCOMPLETE_FACTS = """
version: v2
deployment:
ansible_ssh_user: root
hosts:
- connect_to: 10.0.0.1
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
roles:
- master
- connect_to: 10.0.0.2
ip: 10.0.0.2
hostname: 24.222.0.2
public_ip: 24.222.0.2
roles:
- node
- connect_to: 10.0.0.3
ip: 10.0.0.3
roles:
- node
roles:
master:
node:
"""
CONFIG_BAD = """
variant: openshift-enterprise
version: v2
deployment:
ansible_ssh_user: root
hosts:
- connect_to: master-private.example.com
ip: 10.0.0.1
hostname: master-private.example.com
public_ip: 24.222.0.1
public_hostname: master.example.com
roles:
- master
- node
- ip: 10.0.0.2
hostname: node1-private.example.com
public_ip: 24.222.0.2
public_hostname: node1.example.com
roles:
- node
- connect_to: node2-private.example.com
ip: 10.0.0.3
hostname: node2-private.example.com
public_ip: 24.222.0.3
public_hostname: node2.example.com
roles:
- node
roles:
master:
node:
"""
class OOInstallFixture(unittest.TestCase):
def setUp(self):
self.tempfiles = []
self.work_dir = tempfile.mkdtemp(prefix='ooconfigtests')
self.tempfiles.append(self.work_dir)
def tearDown(self):
for path in self.tempfiles:
if os.path.isdir(path):
shutil.rmtree(path)
else:
os.remove(path)
def write_config(self, path, config_str):
"""
Write given config to a temporary file which will be cleaned
up in teardown.
Returns full path to the file.
"""
cfg_file = open(path, 'w')
cfg_file.write(config_str)
cfg_file.close()
return path
class OOConfigTests(OOInstallFixture):
def test_load_config(self):
cfg_path = self.write_config(
os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
self.assertEquals(3, len(ooconfig.deployment.hosts))
self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].connect_to)
self.assertEquals("10.0.0.1", ooconfig.deployment.hosts[0].ip)
self.assertEquals("master-private.example.com", ooconfig.deployment.hosts[0].hostname)
self.assertEquals(["10.0.0.1", "10.0.0.2", "10.0.0.3"],
[host.ip for host in ooconfig.deployment.hosts])
self.assertEquals('openshift-enterprise', ooconfig.settings['variant'])
self.assertEquals('v2', ooconfig.settings['version'])
def test_load_bad_config(self):
cfg_path = self.write_config(
os.path.join(self.work_dir, 'ooinstall.conf'), CONFIG_BAD)
try:
OOConfig(cfg_path)
assert False
except OOConfigInvalidHostError:
assert True
def test_load_complete_facts(self):
cfg_path = self.write_config(
os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
missing_host_facts = ooconfig.calc_missing_facts()
self.assertEquals(0, len(missing_host_facts))
# Test missing optional facts the user must confirm:
def test_load_host_incomplete_facts(self):
cfg_path = self.write_config(
os.path.join(self.work_dir, 'ooinstall.conf'), CONFIG_INCOMPLETE_FACTS)
ooconfig = OOConfig(cfg_path)
missing_host_facts = ooconfig.calc_missing_facts()
self.assertEquals(2, len(missing_host_facts))
self.assertEquals(1, len(missing_host_facts['10.0.0.2']))
self.assertEquals(3, len(missing_host_facts['10.0.0.3']))
def test_write_config(self):
cfg_path = self.write_config(
os.path.join(self.work_dir, 'ooinstall.conf'), SAMPLE_CONFIG)
ooconfig = OOConfig(cfg_path)
ooconfig.save_to_disk()
f = open(cfg_path, 'r')
written_config = yaml.safe_load(f.read())
f.close()
self.assertEquals(3, len(written_config['deployment']['hosts']))
for h in written_config['deployment']['hosts']:
self.assertTrue('ip' in h)
self.assertTrue('public_ip' in h)
self.assertTrue('hostname' in h)
self.assertTrue('public_hostname' in h)
self.assertTrue('ansible_ssh_user' in written_config['deployment'])
self.assertTrue('variant' in written_config)
self.assertEquals('v2', written_config['version'])
# Some advanced settings should not get written out if they
# were not specified by the user:
self.assertFalse('ansible_inventory_directory' in written_config)
class HostTests(OOInstallFixture):
def test_load_host_no_ip_or_hostname(self):
yaml_props = {
'public_ip': '192.168.0.1',
'public_hostname': 'a.example.com',
'master': True
}
self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
def test_load_host_no_master_or_node_specified(self):
yaml_props = {
'ip': '192.168.0.1',
'hostname': 'a.example.com',
'public_ip': '192.168.0.1',
'public_hostname': 'a.example.com',
}
self.assertRaises(OOConfigInvalidHostError, Host, **yaml_props)
def test_inventory_file_quotes_node_labels(self):
"""Verify a host entry wraps openshift_node_labels value in double quotes"""
yaml_props = {
'ip': '192.168.0.1',
'hostname': 'a.example.com',
'connect_to': 'a-private.example.com',
'public_ip': '192.168.0.1',
'public_hostname': 'a.example.com',
'new_host': True,
'roles': ['node'],
'node_labels': {
'region': 'infra'
},
}
new_node = Host(**yaml_props)
inventory = cStringIO()
# This is what the 'write_host' function generates. write_host
# has no return value, it just writes directly to the file
# 'inventory' which in this test-case is a StringIO object
ooinstall.openshift_ansible.write_host(
new_node,
'node',
inventory,
schedulable=True)
# read the value of what was written to the inventory "file"
legacy_inventory_line = inventory.getvalue()
# Given the `yaml_props` above we should see a line like this:
# openshift_node_labels="{'region': 'infra'}"
# Quotes around the hash
node_labels_expected = '''openshift_node_labels="{'region': 'infra'}"'''
# No quotes around the hash
node_labels_bad = '''openshift_node_labels={'region': 'infra'}'''
# The good line is present in the written inventory line
self.assertIn(node_labels_expected, legacy_inventory_line)
# An unquoted version is not present
self.assertNotIn(node_labels_bad, legacy_inventory_line)

View File

@@ -1,71 +0,0 @@
import os
import unittest
import tempfile
import shutil
from six.moves import configparser
from ooinstall import openshift_ansible
from ooinstall.oo_config import Host, OOConfig
BASE_CONFIG = """
---
variant: openshift-enterprise
variant_version: 3.3
version: v2
deployment:
ansible_ssh_user: cloud-user
hosts: []
roles:
master:
node:
"""
class TestOpenShiftAnsible(unittest.TestCase):
def setUp(self):
self.tempfiles = []
self.work_dir = tempfile.mkdtemp(prefix='openshift_ansible_tests')
self.configfile = os.path.join(self.work_dir, 'ooinstall.config')
with open(self.configfile, 'w') as config_file:
config_file.write(BASE_CONFIG)
self.inventory = os.path.join(self.work_dir, 'hosts')
config = OOConfig(self.configfile)
config.settings['ansible_inventory_path'] = self.inventory
openshift_ansible.set_config(config)
def tearDown(self):
shutil.rmtree(self.work_dir)
def test_generate_inventory_new_nodes(self):
hosts = generate_hosts(1, 'master', roles=(['master', 'etcd']))
hosts.extend(generate_hosts(1, 'node', roles=['node']))
hosts.extend(generate_hosts(1, 'new_node', roles=['node'], new_host=True))
openshift_ansible.generate_inventory(hosts)
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(self.inventory)
self.assertTrue(inventory.has_section('new_nodes'))
self.assertTrue(inventory.has_option('new_nodes', 'new_node1'))
def test_write_inventory_vars_role_vars(self):
with open(self.inventory, 'w') as inv:
openshift_ansible.CFG.deployment.roles['master'].variables = {'color': 'blue'}
openshift_ansible.CFG.deployment.roles['node'].variables = {'color': 'green'}
openshift_ansible.write_inventory_vars(inv, None)
inventory = configparser.ConfigParser(allow_no_value=True)
inventory.read(self.inventory)
self.assertTrue(inventory.has_section('masters:vars'))
self.assertEquals('blue', inventory.get('masters:vars', 'color'))
self.assertTrue(inventory.has_section('nodes:vars'))
self.assertEquals('green', inventory.get('nodes:vars', 'color'))
def generate_hosts(num_hosts, name_prefix, roles=None, new_host=False):
hosts = []
for num in range(1, num_hosts + 1):
hosts.append(Host(connect_to=name_prefix + str(num),
roles=roles, new_host=new_host))
return hosts

View File

@@ -1,98 +0,0 @@
"""
Unittests for ooinstall utils.
"""
import unittest
import copy
import mock
import six
from ooinstall.utils import debug_env, is_valid_hostname
class TestUtils(unittest.TestCase):
"""
Parent unittest TestCase.
"""
def setUp(self):
self.debug_all_params = {
'OPENSHIFT_FOO': 'bar',
'ANSIBLE_FOO': 'bar',
'OO_FOO': 'bar'
}
self.expected = [
mock.call('ANSIBLE_FOO: bar'),
mock.call('OPENSHIFT_FOO: bar'),
mock.call('OO_FOO: bar'),
]
######################################################################
# Validate ooinstall.utils.debug_env functionality
def test_utils_debug_env_all_debugged(self):
"""Verify debug_env debugs specific env variables"""
with mock.patch('ooinstall.utils.installer_log') as _il:
debug_env(self.debug_all_params)
# Debug was called for each item we expect
self.assertEqual(
len(self.debug_all_params),
_il.debug.call_count)
# Each item we expect was logged
six.assertCountEqual(
self,
self.expected,
_il.debug.call_args_list)
def test_utils_debug_env_some_debugged(self):
"""Verify debug_env skips non-wanted env variables"""
debug_some_params = copy.deepcopy(self.debug_all_params)
# This will not be logged by debug_env
debug_some_params['MG_FRBBR'] = "SKIPPED"
with mock.patch('ooinstall.utils.installer_log') as _il:
debug_env(debug_some_params)
# The actual number of debug calls was less than the
# number of items passed to debug_env
self.assertLess(
_il.debug.call_count,
len(debug_some_params))
six.assertCountEqual(
self,
self.expected,
_il.debug.call_args_list)
######################################################################
def test_utils_is_valid_hostname_invalid(self):
"""Verify is_valid_hostname can detect None or too-long hostnames"""
# A hostname that's empty, None, or more than 255 chars is invalid
empty_hostname = ''
res = is_valid_hostname(empty_hostname)
self.assertFalse(res)
none_hostname = None
res = is_valid_hostname(none_hostname)
self.assertFalse(res)
too_long_hostname = "a" * 256
res = is_valid_hostname(too_long_hostname)
self.assertFalse(res)
def test_utils_is_valid_hostname_ends_with_dot(self):
"""Verify is_valid_hostname can parse hostnames with trailing periods"""
hostname = "foo.example.com."
res = is_valid_hostname(hostname)
self.assertTrue(res)
def test_utils_is_valid_hostname_normal_hostname(self):
"""Verify is_valid_hostname can parse regular hostnames"""
hostname = "foo.example.com"
res = is_valid_hostname(hostname)
self.assertTrue(res)