mirror of
https://github.com/openshift/image-registry.git
synced 2026-02-05 09:45:55 +01:00
Add standard tool environment
This commit is contained in:
124
Makefile
Normal file
124
Makefile
Normal file
@@ -0,0 +1,124 @@
|
||||
# Old-skool build tools.
|
||||
#
|
||||
# Targets (see each target for more information):
|
||||
# all: Build code.
|
||||
# build: Build code.
|
||||
# check: Run verify, build, unit tests and cmd tests.
|
||||
# test: Run all tests.
|
||||
# run: Run all-in-one server
|
||||
# clean: Clean up.
|
||||
|
||||
OUT_DIR = _output
|
||||
OS_OUTPUT_GOPATH ?= 1
|
||||
|
||||
export GOFLAGS
|
||||
export TESTFLAGS
|
||||
# If set to 1, create an isolated GOPATH inside _output using symlinks to avoid
|
||||
# other packages being accidentally included. Defaults to on.
|
||||
export OS_OUTPUT_GOPATH
|
||||
# May be used to set additional arguments passed to the image build commands for
|
||||
# mounting secrets specific to a build environment.
|
||||
export OS_BUILD_IMAGE_ARGS
|
||||
|
||||
# Tests run using `make` are most often run by the CI system, so we are OK to
|
||||
# assume the user wants jUnit output and will turn it off if they don't.
|
||||
JUNIT_REPORT ?= true
|
||||
|
||||
# Build code.
|
||||
#
|
||||
# Args:
|
||||
# WHAT: Directory names to build. If any of these directories has a 'main'
|
||||
# package, the build will produce executable files under $(OUT_DIR)/local/bin.
|
||||
# If not specified, "everything" will be built.
|
||||
# GOFLAGS: Extra flags to pass to 'go' when building.
|
||||
# TESTFLAGS: Extra flags that should only be passed to hack/test-go.sh
|
||||
#
|
||||
# Example:
|
||||
# make
|
||||
# make all
|
||||
# make all WHAT=cmd/oc GOFLAGS=-v
|
||||
all build:
|
||||
hack/build-go.sh $(WHAT) $(GOFLAGS)
|
||||
.PHONY: all build
|
||||
|
||||
# Run core verification and all self contained tests.
|
||||
#
|
||||
# Example:
|
||||
# make check
|
||||
check: | verify test-unit
|
||||
.PHONY: check
|
||||
|
||||
|
||||
# Verify code conventions are properly setup.
|
||||
#
|
||||
# Example:
|
||||
# make verify
|
||||
verify:
|
||||
{ \
|
||||
hack/verify-gofmt.sh ||r=1;\
|
||||
hack/verify-govet.sh ||r=1;\
|
||||
hack/verify-imports.sh ||r=1;\
|
||||
exit $$r ;\
|
||||
}
|
||||
.PHONY: verify
|
||||
|
||||
|
||||
# Verify commit comments.
|
||||
#
|
||||
# Example:
|
||||
# make verify-commits
|
||||
verify-commits:
|
||||
hack/verify-upstream-commits.sh
|
||||
.PHONY: verify-commits
|
||||
|
||||
# Run unit tests.
|
||||
#
|
||||
# Args:
|
||||
# WHAT: Directory names to test. All *_test.go files under these
|
||||
# directories will be run. If not specified, "everything" will be tested.
|
||||
# TESTS: Same as WHAT.
|
||||
# GOFLAGS: Extra flags to pass to 'go' when building.
|
||||
# TESTFLAGS: Extra flags that should only be passed to hack/test-go.sh
|
||||
#
|
||||
# Example:
|
||||
# make test-unit
|
||||
# make test-unit WHAT=pkg/build TESTFLAGS=-v
|
||||
test-unit:
|
||||
GOTEST_FLAGS="$(TESTFLAGS)" hack/test-go.sh $(WHAT) $(TESTS)
|
||||
.PHONY: test-unit
|
||||
|
||||
# Remove all build artifacts.
|
||||
#
|
||||
# Example:
|
||||
# make clean
|
||||
clean:
|
||||
rm -rf $(OUT_DIR)
|
||||
.PHONY: clean
|
||||
|
||||
# Build the cross compiled release binaries
|
||||
#
|
||||
# Example:
|
||||
# make build-cross
|
||||
build-cross:
|
||||
hack/build-cross.sh
|
||||
.PHONY: build-cross
|
||||
|
||||
# Build RPMs only for the Linux AMD64 target
|
||||
#
|
||||
# Args:
|
||||
#
|
||||
# Example:
|
||||
# make build-rpms
|
||||
build-rpms:
|
||||
OS_ONLY_BUILD_PLATFORMS='linux/amd64' hack/build-rpms.sh
|
||||
.PHONY: build-rpms
|
||||
|
||||
# Build images from the official RPMs
|
||||
#
|
||||
# Args:
|
||||
#
|
||||
# Example:
|
||||
# make build-images
|
||||
build-images: build-rpms
|
||||
hack/build-images.sh
|
||||
.PHONY: build-images
|
||||
2
hack/OWNERS
Normal file
2
hack/OWNERS
Normal file
@@ -0,0 +1,2 @@
|
||||
reviewers:
|
||||
approvers:
|
||||
0
hack/boilerplate.txt
Normal file
0
hack/boilerplate.txt
Normal file
95
hack/build-cross.sh
Executable file
95
hack/build-cross.sh
Executable file
@@ -0,0 +1,95 @@
|
||||
#!/bin/bash
|
||||
|
||||
# Build all cross compile targets and the base binaries
|
||||
STARTTIME=$(date +%s)
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
host_platform="$(os::build::host_platform)"
|
||||
|
||||
# by default, build for these platforms
|
||||
platforms=(
|
||||
linux/amd64
|
||||
darwin/amd64
|
||||
windows/amd64
|
||||
)
|
||||
image_platforms=( )
|
||||
test_platforms=( "${host_platform}" )
|
||||
|
||||
targets=( "${OS_CROSS_COMPILE_TARGETS[@]}" )
|
||||
|
||||
# Special case ppc64le
|
||||
if [[ "${host_platform}" == "linux/ppc64le" ]]; then
|
||||
platforms+=( "linux/ppc64le" )
|
||||
fi
|
||||
|
||||
# Special case arm64
|
||||
if [[ "${host_platform}" == "linux/arm64" ]]; then
|
||||
platforms+=( "linux/arm64" )
|
||||
fi
|
||||
|
||||
# Special case s390x
|
||||
if [[ "${host_platform}" == "linux/s390x" ]]; then
|
||||
platforms+=( "linux/s390x" )
|
||||
fi
|
||||
|
||||
# On linux platforms, build images
|
||||
if [[ "${host_platform}" == linux/* ]]; then
|
||||
image_platforms+=( "${host_platform}" )
|
||||
fi
|
||||
|
||||
# filter platform list
|
||||
if [[ -n "${OS_ONLY_BUILD_PLATFORMS-}" ]]; then
|
||||
filtered=( )
|
||||
for platform in ${platforms[@]}; do
|
||||
if [[ "${platform}" =~ "${OS_ONLY_BUILD_PLATFORMS}" ]]; then
|
||||
filtered+=("${platform}")
|
||||
fi
|
||||
done
|
||||
platforms=("${filtered[@]+"${filtered[@]}"}")
|
||||
|
||||
filtered=( )
|
||||
for platform in ${image_platforms[@]}; do
|
||||
if [[ "${platform}" =~ "${OS_ONLY_BUILD_PLATFORMS}" ]]; then
|
||||
filtered+=("${platform}")
|
||||
fi
|
||||
done
|
||||
image_platforms=("${filtered[@]+"${filtered[@]}"}")
|
||||
|
||||
filtered=( )
|
||||
for platform in ${test_platforms[@]}; do
|
||||
if [[ "${platform}" =~ "${OS_ONLY_BUILD_PLATFORMS}" ]]; then
|
||||
filtered+=("${platform}")
|
||||
fi
|
||||
done
|
||||
test_platforms=("${filtered[@]+"${filtered[@]}"}")
|
||||
fi
|
||||
|
||||
# Build image binaries for a subset of platforms. Image binaries are currently
|
||||
# linux-only, and a subset of them are compiled with flags to make them static
|
||||
# for use in Docker images "FROM scratch".
|
||||
OS_BUILD_PLATFORMS=("${image_platforms[@]+"${image_platforms[@]}"}")
|
||||
os::build::build_static_binaries "${OS_SCRATCH_IMAGE_COMPILE_TARGETS_LINUX[@]-}"
|
||||
os::build::build_binaries "${OS_IMAGE_COMPILE_TARGETS_LINUX[@]-}"
|
||||
|
||||
# Build the primary client/server for all platforms
|
||||
OS_BUILD_PLATFORMS=("${platforms[@]+"${platforms[@]}"}")
|
||||
os::build::build_binaries "${OS_CROSS_COMPILE_TARGETS[@]-}"
|
||||
|
||||
# Build the test binaries for the host platform
|
||||
OS_BUILD_PLATFORMS=("${test_platforms[@]+"${test_platforms[@]}"}")
|
||||
os::build::build_binaries "${OS_TEST_TARGETS[@]-}"
|
||||
|
||||
# Place binaries only
|
||||
OS_BUILD_PLATFORMS=("${platforms[@]+"${platforms[@]}"}") \
|
||||
os::build::place_bins "${OS_CROSS_COMPILE_BINARIES[@]-}"
|
||||
OS_BUILD_PLATFORMS=("${image_platforms[@]+"${image_platforms[@]}"}") \
|
||||
os::build::place_bins "${OS_IMAGE_COMPILE_BINARIES[@]-}"
|
||||
|
||||
if [[ "${OS_GIT_TREE_STATE:-dirty}" == "clean" ]]; then
|
||||
# only when we are building from a clean state can we claim to
|
||||
# have created a valid set of binaries that can resemble a release
|
||||
mkdir -p "${OS_OUTPUT_RELEASEPATH}"
|
||||
echo "${OS_GIT_COMMIT}" > "${OS_OUTPUT_RELEASEPATH}/.commit"
|
||||
fi
|
||||
|
||||
ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
|
||||
21
hack/build-go.sh
Executable file
21
hack/build-go.sh
Executable file
@@ -0,0 +1,21 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script sets up a go workspace locally and builds all go components.
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
function cleanup() {
|
||||
return_code=$?
|
||||
os::util::describe_return_code "${return_code}"
|
||||
exit "${return_code}"
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
build_targets=("$@")
|
||||
if [[ -z "$@" ]]; then
|
||||
build_targets=("${OS_CROSS_COMPILE_TARGETS[@]}")
|
||||
fi
|
||||
|
||||
platform="$(os::build::host_platform)"
|
||||
OS_BUILD_PLATFORMS=("${OS_BUILD_PLATFORMS[@]:-${platform}}")
|
||||
os::build::build_binaries "${build_targets[@]}"
|
||||
os::build::place_bins "${build_targets[@]}"
|
||||
25
hack/build-images.sh
Executable file
25
hack/build-images.sh
Executable file
@@ -0,0 +1,25 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script builds all images locally except the base and release images,
|
||||
# which are handled by hack/build-base-images.sh.
|
||||
|
||||
# NOTE: you only need to run this script if your code changes are part of
|
||||
# any images OpenShift runs internally such as origin-sti-builder, origin-docker-builder,
|
||||
# origin-deployer, etc.
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
function cleanup() {
|
||||
return_code=$?
|
||||
os::util::describe_return_code "${return_code}"
|
||||
exit "${return_code}"
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
os::util::ensure::gopath_binary_exists imagebuilder
|
||||
# image builds require RPMs to have been built
|
||||
os::build::release::check_for_rpms
|
||||
|
||||
# we need to mount RPMs into the container builds for installation
|
||||
OS_BUILD_IMAGE_ARGS="${OS_BUILD_IMAGE_ARGS:-} -mount ${OS_OUTPUT_RPMPATH}/:/srv/origin-local-release/"
|
||||
|
||||
os::build::images
|
||||
131
hack/build-rpms.sh
Executable file
131
hack/build-rpms.sh
Executable file
@@ -0,0 +1,131 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script generates release zips and RPMs into _output/releases.
|
||||
# tito and other build dependencies are required on the host. We will
|
||||
# be running `hack/build-cross.sh` under the covers, so we transitively
|
||||
# consume all of the relevant envars.
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
function cleanup() {
|
||||
return_code=$?
|
||||
os::util::describe_return_code "${return_code}"
|
||||
exit "${return_code}"
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
# check whether we are in a clean output state
|
||||
dirty="$( if [[ -d "${OS_OUTPUT}" ]]; then echo '1'; fi )"
|
||||
|
||||
os::util::ensure::system_binary_exists rpmbuild
|
||||
os::util::ensure::system_binary_exists createrepo
|
||||
os::build::setup_env
|
||||
|
||||
if [[ "${OS_ONLY_BUILD_PLATFORMS:-}" == 'linux/amd64' ]]; then
|
||||
# when the user is asking for only Linux binaries, we will
|
||||
# furthermore not build cross-platform clients in tito
|
||||
make_redistributable=0
|
||||
else
|
||||
make_redistributable=1
|
||||
fi
|
||||
if [[ -n "${OS_BUILD_SRPM-}" ]]; then
|
||||
srpm="a"
|
||||
else
|
||||
srpm="b"
|
||||
fi
|
||||
|
||||
|
||||
os::build::rpm::get_nvra_vars
|
||||
|
||||
OS_RPM_SPECFILE="$( find "${OS_ROOT}" -name *.spec )"
|
||||
OS_RPM_NAME="$( rpmspec -q --qf '%{name}\n' "${OS_RPM_SPECFILE}" | head -1 )"
|
||||
|
||||
os::log::info "Building release RPMs for ${OS_RPM_SPECFILE} ..."
|
||||
|
||||
rpm_tmp_dir="${BASETMPDIR}/rpm"
|
||||
|
||||
# RPM requires the spec file be owned by the invoking user
|
||||
chown "$(id -u):$(id -g)" "${OS_RPM_SPECFILE}" || true
|
||||
|
||||
if [[ -n "${dirty}" && "${OS_GIT_TREE_STATE}" == "dirty" ]]; then
|
||||
os::log::warning "Repository is not clean, performing fast build and reusing _output"
|
||||
|
||||
# build and output from source to destination
|
||||
mkdir -p "${rpm_tmp_dir}"
|
||||
ln -fs "${OS_ROOT}" "${rpm_tmp_dir}/SOURCES"
|
||||
ln -fs "${OS_ROOT}" "${rpm_tmp_dir}/BUILD"
|
||||
|
||||
rpmbuild -bb "${OS_RPM_SPECFILE}" \
|
||||
--define "_sourcedir ${rpm_tmp_dir}/SOURCES" \
|
||||
--define "_builddir ${rpm_tmp_dir}/BUILD" \
|
||||
--define "skip_prep 1" \
|
||||
--define "skip_dist 1" \
|
||||
--define "make_redistributable ${make_redistributable}" \
|
||||
--define "version ${OS_RPM_VERSION}" --define "release ${OS_RPM_RELEASE}" \
|
||||
--define "commit ${OS_GIT_COMMIT}" \
|
||||
--define "os_git_vars ${OS_RPM_GIT_VARS}" \
|
||||
--define 'dist .el7' --define "_topdir ${rpm_tmp_dir}"
|
||||
|
||||
mkdir -p "${OS_OUTPUT_RPMPATH}"
|
||||
mv -f "${rpm_tmp_dir}"/RPMS/*/*.rpm "${OS_OUTPUT_RPMPATH}"
|
||||
|
||||
else
|
||||
mkdir -p "${rpm_tmp_dir}/SOURCES"
|
||||
tar czf "${rpm_tmp_dir}/SOURCES/${OS_RPM_NAME}-${OS_RPM_VERSION}.tar.gz" \
|
||||
--owner=0 --group=0 \
|
||||
--exclude=_output --exclude=.git --transform "s|^|${OS_RPM_NAME}-${OS_RPM_VERSION}/|rSH" \
|
||||
.
|
||||
|
||||
rpmbuild -b${srpm} "${OS_RPM_SPECFILE}" \
|
||||
--define "skip_dist 1" \
|
||||
--define "make_redistributable ${make_redistributable}" \
|
||||
--define "version ${OS_RPM_VERSION}" --define "release ${OS_RPM_RELEASE}" \
|
||||
--define "commit ${OS_GIT_COMMIT}" \
|
||||
--define "os_git_vars ${OS_RPM_GIT_VARS}" \
|
||||
--define 'dist .el7' --define "_topdir ${rpm_tmp_dir}"
|
||||
|
||||
output_directory="$( find "${rpm_tmp_dir}" -type d -path "*/BUILD/${OS_RPM_NAME}-${OS_RPM_VERSION}/_output/local" )"
|
||||
if [[ -z "${output_directory}" ]]; then
|
||||
os::log::fatal 'No _output artifact directory found in rpmbuild artifacts!'
|
||||
fi
|
||||
|
||||
# migrate the rpm artifacts to the output directory, must be clean or move will fail
|
||||
make clean
|
||||
mkdir -p "${OS_OUTPUT}"
|
||||
|
||||
# mv exits prematurely with status 1 in the following scenario: running as root,
|
||||
# attempting to move a [directory tree containing a] symlink to a destination on
|
||||
# an NFS volume exported with root_squash set. This can occur when running this
|
||||
# script on a Vagrant box. The error shown is "mv: failed to preserve ownership
|
||||
# for $FILE: Operation not permitted". As a workaround, if
|
||||
# ${tito_output_directory} and ${OS_OUTPUT} are on different devices, use cp and
|
||||
# rm instead.
|
||||
if [[ $(stat -c %d "${output_directory}") == $(stat -c %d "${OS_OUTPUT}") ]]; then
|
||||
mv "${output_directory}"/* "${OS_OUTPUT}"
|
||||
else
|
||||
cp -R "${output_directory}"/* "${OS_OUTPUT}"
|
||||
rm -rf "${output_directory}"/*
|
||||
fi
|
||||
|
||||
mkdir -p "${OS_OUTPUT_RPMPATH}"
|
||||
if [[ -n "${OS_BUILD_SRPM-}" ]]; then
|
||||
mv -f "${rpm_tmp_dir}"/SRPMS/*src.rpm "${OS_OUTPUT_RPMPATH}"
|
||||
fi
|
||||
mv -f "${rpm_tmp_dir}"/RPMS/*/*.rpm "${OS_OUTPUT_RPMPATH}"
|
||||
fi
|
||||
|
||||
mkdir -p "${OS_OUTPUT_RELEASEPATH}"
|
||||
echo "${OS_GIT_COMMIT}" > "${OS_OUTPUT_RELEASEPATH}/.commit"
|
||||
|
||||
repo_path="$( os::util::absolute_path "${OS_OUTPUT_RPMPATH}" )"
|
||||
createrepo "${repo_path}"
|
||||
|
||||
echo "[${OS_RPM_NAME}-local-release]
|
||||
baseurl = file://${repo_path}
|
||||
gpgcheck = 0
|
||||
name = Release from Local Source for ${OS_RPM_NAME}
|
||||
enabled = 1
|
||||
" > "${repo_path}/local-release.repo"
|
||||
|
||||
os::log::info "Repository file for \`yum\` or \`dnf\` placed at ${repo_path}/local-release.repo
|
||||
Install it with:
|
||||
$ mv '${repo_path}/local-release.repo' '/etc/yum.repos.d"
|
||||
88
hack/cherry-pick.sh
Executable file
88
hack/cherry-pick.sh
Executable file
@@ -0,0 +1,88 @@
|
||||
#!/bin/bash
|
||||
|
||||
# See HACKING.md for usage
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
repo="${UPSTREAM_REPO:-k8s.io/kubernetes}"
|
||||
package="${UPSTREAM_PACKAGE:-pkg/api}"
|
||||
UPSTREAM_REPO_LOCATION="${UPSTREAM_REPO_LOCATION:-../../../${repo}}"
|
||||
pr="$1"
|
||||
|
||||
if [[ "$#" -ne 1 ]]; then
|
||||
echo "You must supply a pull request by number or a Git range in the upstream ${repo} project" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
os::build::require_clean_tree # Origin tree must be clean
|
||||
|
||||
patch="${TMPDIR:-/tmp}/patch"
|
||||
rm -rf "${patch}"
|
||||
mkdir -p "${patch}"
|
||||
patch="${patch}/cherry-pick"
|
||||
|
||||
if [[ ! -d "${UPSTREAM_REPO_LOCATION}" ]]; then
|
||||
echo "Expected ${UPSTREAM_REPO_LOCATION} to exist" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
lastrev="${NO_REBASE-}"
|
||||
if [[ -z "${NO_REBASE-}" ]]; then
|
||||
lastrev="$(go run ${OS_ROOT}/tools/godepversion/godepversion.go ${OS_ROOT}/Godeps/Godeps.json ${repo}/${package})"
|
||||
fi
|
||||
|
||||
pushd "${UPSTREAM_REPO_LOCATION}" > /dev/null
|
||||
os::build::require_clean_tree
|
||||
|
||||
remote="${UPSTREAM_REMOTE:-origin}"
|
||||
git fetch ${remote}
|
||||
|
||||
selector="$(os::build::commit_range $pr ${remote}/master)"
|
||||
|
||||
if [[ -z "${NO_REBASE-}" ]]; then
|
||||
echo "++ Generating patch for ${selector} onto ${lastrev} ..." 2>&1
|
||||
if git rev-parse last_upstream_branch > /dev/null 2>&1; then
|
||||
git branch -d last_upstream_branch
|
||||
fi
|
||||
git checkout -b last_upstream_branch "${lastrev}"
|
||||
git diff -p --raw --binary "${selector}" > "${patch}"
|
||||
if ! git apply -3 "${patch}"; then
|
||||
git rerere # record pre state
|
||||
echo 2>&1
|
||||
echo "++ Merge conflicts when generating patch, please resolve conflicts and then press ENTER to continue" 1>&2
|
||||
read
|
||||
fi
|
||||
git rerere # record post state
|
||||
# stage any new files
|
||||
git add . > /dev/null
|
||||
# construct a new patch
|
||||
git diff --cached -p --raw --binary --{src,dst}-prefix=a/vendor/${repo}/ > "${patch}"
|
||||
# cleanup the current state
|
||||
git reset HEAD --hard > /dev/null
|
||||
git checkout master > /dev/null
|
||||
git branch -D last_upstream_branch > /dev/null
|
||||
else
|
||||
echo "++ Generating patch for ${selector} without rebasing ..." 2>&1
|
||||
git diff -p --raw --binary --{src,dst}-prefix=a/vendor/${repo}/ "${selector}" > "${patch}"
|
||||
fi
|
||||
|
||||
popd > /dev/null
|
||||
|
||||
echo "++ Applying patch ..." 2>&1
|
||||
echo 2>&1
|
||||
set +e
|
||||
git apply --reject "${patch}"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "++ Not all patches applied, merge *.rej into your files or rerun with REBASE=1"
|
||||
exit 1
|
||||
fi
|
||||
|
||||
commit_message="UPSTREAM: $pr: Cherry-picked"
|
||||
if [ "$repo" != "k8s.io/kubernetes" ]; then
|
||||
commit_message="UPSTREAM: $repo: $pr: Cherry-picked"
|
||||
fi
|
||||
|
||||
set -o errexit
|
||||
git add .
|
||||
git commit -m "$commit_message" > /dev/null
|
||||
git commit --amend
|
||||
echo 2>&1
|
||||
echo "++ Done" 2>&1
|
||||
29
hack/deps
Executable file
29
hack/deps
Executable file
@@ -0,0 +1,29 @@
|
||||
#!/bin/bash
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
if [[ $# -eq 0 || ! -f "${OS_OUTPUT}/deps" ]]; then
|
||||
echo "Generating dependency graph ..." 1>&2
|
||||
mkdir -p "${OS_OUTPUT}"
|
||||
os::util::list_go_deps > "${OS_OUTPUT}/deps"
|
||||
fi
|
||||
|
||||
if [[ $# -eq 0 ]]; then
|
||||
echo "Dependencies generated to ${OS_OUTPUT}/deps"
|
||||
echo
|
||||
echo "Install digraph with: go get -u golang.org/x/tools/cmd/digraph"
|
||||
echo
|
||||
echo "To see the list of all dependencies of a package: "
|
||||
echo " hack/deps.sh forward ${OS_GO_PACKAGE}/cmd/openshift"
|
||||
echo
|
||||
echo "To see how a package was included into a binary (one particular way): "
|
||||
echo " hack/deps.sh somepath ${OS_GO_PACKAGE}/cmd/openshift FULL_PACKAGE_NAME"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
os::util::ensure::system_binary_exists 'digraph'
|
||||
cat "${OS_OUTPUT}/deps" | digraph $@
|
||||
42
hack/env
Executable file
42
hack/env
Executable file
@@ -0,0 +1,42 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This starts a Docker container using the release image (openshift/origin-release:golang-1.6)
|
||||
# and syncs the local directory into that image. The default mode performs a 'git archive' of
|
||||
# the current HEAD, so you get a reproducible environment. You can also set
|
||||
# OS_BUILD_ENV_REUSE_VOLUME to a docker volume name to rsync (or docker cp) the contents of
|
||||
# the current directory into the image.
|
||||
#
|
||||
# Examples:
|
||||
# # sync local dir into the volume and print the Docker create command
|
||||
# $ hack/env
|
||||
#
|
||||
# # builds the current HEAD in the container
|
||||
# $ hack/env make
|
||||
#
|
||||
# # builds the current HEAD and copy _output/releases back locally afterwards
|
||||
# $ OS_BUILD_ENV_PRESERVE=_output/releases hack/env make release
|
||||
#
|
||||
# # run all update tasks and copy the api, pkg, and docs directories back out
|
||||
# $ OS_BUILD_ENV_PRESERVE=api:docs:pkg hack/env make update
|
||||
#
|
||||
# # rsync the contents of the current directory into the 'local' docker volume
|
||||
# # and iteratively build
|
||||
# $ export OS_BUILD_ENV_REUSE_VOLUME=local
|
||||
# $ export OS_BUILD_ENV_DOCKER_ARGS='-e OS_VERSION_FILE= '
|
||||
# $ hack/env make # slow
|
||||
# $ hack/env make # fast!
|
||||
#
|
||||
# # force a new volume to get created from the current source
|
||||
# $ OS_BUILD_ENV_VOLUME_FORCE_NEW=TRUE hack/env
|
||||
#
|
||||
|
||||
# NOTE: only committed code is built.
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
relative_bin_path="$( os::util::repository_relative_path "${OS_OUTPUT_BINPATH}" )"
|
||||
relative_release_path="$( os::util::repository_relative_path "${OS_OUTPUT_RELEASEPATH}" )"
|
||||
relative_script_path="$( os::util::repository_relative_path "${OS_OUTPUT_SCRIPTPATH}" )"
|
||||
default_preserve_paths="${relative_bin_path}:${relative_release_path}:${relative_script_path}"
|
||||
export OS_BUILD_ENV_PRESERVE="${OS_BUILD_ENV_PRESERVE:-"${default_preserve_paths}"}"
|
||||
|
||||
os::build::environment::run "$@"
|
||||
2
hack/import-restrictions.json
Normal file
2
hack/import-restrictions.json
Normal file
@@ -0,0 +1,2 @@
|
||||
[
|
||||
]
|
||||
2
hack/lib/OWNERS
Normal file
2
hack/lib/OWNERS
Normal file
@@ -0,0 +1,2 @@
|
||||
reviewers:
|
||||
approvers:
|
||||
101
hack/lib/build/archive.sh
Normal file
101
hack/lib/build/archive.sh
Normal file
@@ -0,0 +1,101 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This library holds utility functions for archiving
|
||||
# built binaries and releases.
|
||||
|
||||
function os::build::archive::name() {
|
||||
echo "${OS_RELEASE_ARCHIVE}-${OS_GIT_VERSION}-$1" | tr '+' '-'
|
||||
}
|
||||
readonly -f os::build::archive::name
|
||||
|
||||
function os::build::archive::zip() {
|
||||
local default_name
|
||||
default_name="$( os::build::archive::name "${platform}" ).zip"
|
||||
local archive_name="${archive_name:-$default_name}"
|
||||
echo "++ Creating ${archive_name}"
|
||||
for file in "$@"; do
|
||||
pushd "${release_binpath}" &> /dev/null
|
||||
sha256sum "${file}"
|
||||
popd &>/dev/null
|
||||
zip "${OS_OUTPUT_RELEASEPATH}/${archive_name}" -qj "${release_binpath}/${file}"
|
||||
done
|
||||
}
|
||||
readonly -f os::build::archive::zip
|
||||
|
||||
function os::build::archive::tar() {
|
||||
local base_name
|
||||
base_name="$( os::build::archive::name "${platform}" )"
|
||||
local default_name="${base_name}.tar.gz"
|
||||
local archive_name="${archive_name:-$default_name}"
|
||||
echo "++ Creating ${archive_name}"
|
||||
pushd "${release_binpath}" &> /dev/null
|
||||
find . -type f -exec sha256sum {} \;
|
||||
if [[ -n "$(which bsdtar)" ]]; then
|
||||
bsdtar -czf "${OS_OUTPUT_RELEASEPATH}/${archive_name}" -s ",^\.,${base_name}," $@
|
||||
else
|
||||
tar -czf --xattrs-exclude='LIBARCHIVE.xattr.security.selinux' "${OS_OUTPUT_RELEASEPATH}/${archive_name}" --transform="s,^\.,${base_name}," $@
|
||||
fi
|
||||
popd &>/dev/null
|
||||
}
|
||||
readonly -f os::build::archive::tar
|
||||
|
||||
# Checks if the filesystem on a partition that the provided path points to is
|
||||
# supporting hard links.
|
||||
#
|
||||
# Input:
|
||||
# $1 - the path where the hardlinks support test will be done.
|
||||
# Returns:
|
||||
# 0 - if hardlinks are supported
|
||||
# non-zero - if hardlinks aren't supported
|
||||
function os::build::archive::internal::is_hardlink_supported() {
|
||||
local path="$1"
|
||||
# Determine if FS supports hard links
|
||||
local temp_file=$(TMPDIR="${path}" mktemp)
|
||||
ln "${temp_file}" "${temp_file}.link" &> /dev/null && unlink "${temp_file}.link" || local supported=$?
|
||||
rm -f "${temp_file}"
|
||||
return ${supported:-0}
|
||||
}
|
||||
readonly -f os::build::archive::internal::is_hardlink_supported
|
||||
|
||||
# Extract a tar.gz compressed archive in a given directory. If the
|
||||
# archive contains hardlinks and the underlying filesystem is not
|
||||
# supporting hardlinks then the a hard dereference will be done.
|
||||
#
|
||||
# Input:
|
||||
# $1 - path to archive file
|
||||
# $2 - directory where the archive will be extracted
|
||||
function os::build::archive::extract_tar() {
|
||||
local archive_file="$1"
|
||||
local change_dir="$2"
|
||||
|
||||
if [[ -z "${archive_file}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local tar_flags="--strip-components=1"
|
||||
|
||||
# Unpack archive
|
||||
echo "++ Extracting $(basename ${archive_file})"
|
||||
if [[ "${archive_file}" == *.zip ]]; then
|
||||
unzip -o "${archive_file}" -d "${change_dir}"
|
||||
return 0
|
||||
fi
|
||||
if os::build::archive::internal::is_hardlink_supported "${change_dir}" ; then
|
||||
# Ensure that tar won't try to set an owner when extracting to an
|
||||
# nfs mount. Setting ownership on an nfs mount is likely to fail
|
||||
# even for root.
|
||||
local mount_type=$(df -P -T "${change_dir}" | tail -n +2 | awk '{print $2}')
|
||||
if [[ "${mount_type}" = "nfs" ]]; then
|
||||
tar_flags="${tar_flags} --no-same-owner"
|
||||
fi
|
||||
tar mxzf "${archive_file}" ${tar_flags} -C "${change_dir}"
|
||||
else
|
||||
local temp_dir=$(TMPDIR=/dev/shm/ mktemp -d)
|
||||
tar mxzf "${archive_file}" ${tar_flags} -C "${temp_dir}"
|
||||
pushd "${temp_dir}" &> /dev/null
|
||||
tar cO --hard-dereference * | tar xf - -C "${change_dir}"
|
||||
popd &>/dev/null
|
||||
rm -rf "${temp_dir}"
|
||||
fi
|
||||
}
|
||||
readonly -f os::build::archive::extract_tar
|
||||
414
hack/lib/build/binaries.sh
Normal file
414
hack/lib/build/binaries.sh
Normal file
@@ -0,0 +1,414 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This library holds utility functions for building
|
||||
# and placing Golang binaries for multiple arches.
|
||||
|
||||
# os::build::binaries_from_targets take a list of build targets and return the
|
||||
# full go package to be built
|
||||
function os::build::binaries_from_targets() {
|
||||
local target
|
||||
for target; do
|
||||
if [[ -z "${target}" ]]; then
|
||||
continue
|
||||
fi
|
||||
echo "${OS_GO_PACKAGE}/${target}"
|
||||
done
|
||||
}
|
||||
readonly -f os::build::binaries_from_targets
|
||||
|
||||
# Asks golang what it thinks the host platform is. The go tool chain does some
|
||||
# slightly different things when the target platform matches the host platform.
|
||||
function os::build::host_platform() {
|
||||
echo "$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
|
||||
}
|
||||
readonly -f os::build::host_platform
|
||||
|
||||
# Create a user friendly version of host_platform for end users
|
||||
function os::build::host_platform_friendly() {
|
||||
local platform=${1:-}
|
||||
if [[ -z "${platform}" ]]; then
|
||||
platform=$(os::build::host_platform)
|
||||
fi
|
||||
if [[ $platform == "windows/amd64" ]]; then
|
||||
echo "windows"
|
||||
elif [[ $platform == "darwin/amd64" ]]; then
|
||||
echo "mac"
|
||||
elif [[ $platform == "linux/386" ]]; then
|
||||
echo "linux-32bit"
|
||||
elif [[ $platform == "linux/amd64" ]]; then
|
||||
echo "linux-64bit"
|
||||
elif [[ $platform == "linux/ppc64le" ]]; then
|
||||
echo "linux-powerpc64"
|
||||
elif [[ $platform == "linux/arm64" ]]; then
|
||||
echo "linux-arm64"
|
||||
elif [[ $platform == "linux/s390x" ]]; then
|
||||
echo "linux-s390"
|
||||
else
|
||||
echo "$(go env GOHOSTOS)-$(go env GOHOSTARCH)"
|
||||
fi
|
||||
}
|
||||
readonly -f os::build::host_platform_friendly
|
||||
|
||||
# This converts from platform/arch to PLATFORM_ARCH, host platform will be
|
||||
# considered if no parameter passed
|
||||
function os::build::platform_arch() {
|
||||
local platform=${1:-}
|
||||
if [[ -z "${platform}" ]]; then
|
||||
platform=$(os::build::host_platform)
|
||||
fi
|
||||
|
||||
echo $(echo ${platform} | tr '[:lower:]/' '[:upper:]_')
|
||||
}
|
||||
readonly -f os::build::platform_arch
|
||||
|
||||
# os::build::setup_env will check that the `go` commands is available in
|
||||
# ${PATH}. If not running on Travis, it will also check that the Go version is
|
||||
# good enough for the Kubernetes build.
|
||||
#
|
||||
# Output Vars:
|
||||
# export GOPATH - A modified GOPATH to our created tree along with extra
|
||||
# stuff.
|
||||
# export GOBIN - This is actively unset if already set as we want binaries
|
||||
# placed in a predictable place.
|
||||
function os::build::setup_env() {
|
||||
os::util::ensure::system_binary_exists 'go'
|
||||
|
||||
if [[ -z "$(which sha256sum)" ]]; then
|
||||
sha256sum() {
|
||||
return 0
|
||||
}
|
||||
fi
|
||||
|
||||
# Travis continuous build uses a head go release that doesn't report
|
||||
# a version number, so we skip this check on Travis. It's unnecessary
|
||||
# there anyway.
|
||||
if [[ "${TRAVIS:-}" != "true" ]]; then
|
||||
local go_version
|
||||
go_version=($(go version))
|
||||
if [[ "${go_version[2]}" < "${OS_REQUIRED_GO_VERSION}" ]]; then
|
||||
os::log::fatal "Detected Go version: ${go_version[*]}.
|
||||
Builds require Go version ${OS_REQUIRED_GO_VERSION} or greater."
|
||||
fi
|
||||
fi
|
||||
# For any tools that expect this to be set (it is default in golang 1.6),
|
||||
# force vendor experiment.
|
||||
export GO15VENDOREXPERIMENT=1
|
||||
|
||||
unset GOBIN
|
||||
|
||||
# default to OS_OUTPUT_GOPATH if no GOPATH set
|
||||
if [[ -z "${GOPATH:-}" ]]; then
|
||||
export OS_OUTPUT_GOPATH=1
|
||||
fi
|
||||
|
||||
# use the regular gopath for building
|
||||
if [[ -z "${OS_OUTPUT_GOPATH:-}" ]]; then
|
||||
export OS_TARGET_BIN=${GOPATH}/bin
|
||||
return
|
||||
fi
|
||||
|
||||
# create a local GOPATH in _output
|
||||
GOPATH="${OS_OUTPUT}/go"
|
||||
OS_TARGET_BIN=${GOPATH}/bin
|
||||
local go_pkg_dir="${GOPATH}/src/${OS_GO_PACKAGE}"
|
||||
local go_pkg_basedir=$(dirname "${go_pkg_dir}")
|
||||
|
||||
mkdir -p "${go_pkg_basedir}"
|
||||
rm -f "${go_pkg_dir}"
|
||||
|
||||
# TODO: This symlink should be relative.
|
||||
ln -s "${OS_ROOT}" "${go_pkg_dir}"
|
||||
|
||||
# lots of tools "just don't work" unless we're in the GOPATH
|
||||
cd "${go_pkg_dir}"
|
||||
|
||||
# Append OS_EXTRA_GOPATH to the GOPATH if it is defined.
|
||||
if [[ -n ${OS_EXTRA_GOPATH:-} ]]; then
|
||||
GOPATH="${GOPATH}:${OS_EXTRA_GOPATH}"
|
||||
# TODO: needs to handle multiple directories
|
||||
OS_TARGET_BIN=${OS_EXTRA_GOPATH}/bin
|
||||
fi
|
||||
export GOPATH
|
||||
export OS_TARGET_BIN
|
||||
}
|
||||
readonly -f os::build::setup_env
|
||||
|
||||
# Build static binary targets.
|
||||
#
|
||||
# Input:
|
||||
# $@ - targets and go flags. If no targets are set then all binaries targets
|
||||
# are built.
|
||||
# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
|
||||
# then just the host architecture is built.
|
||||
function os::build::build_static_binaries() {
|
||||
CGO_ENABLED=0 os::build::build_binaries -installsuffix=cgo "$@"
|
||||
}
|
||||
readonly -f os::build::build_static_binaries
|
||||
|
||||
# Build binary targets specified
|
||||
#
|
||||
# Input:
|
||||
# $@ - targets and go flags. If no targets are set then all binaries targets
|
||||
# are built.
|
||||
# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
|
||||
# then just the host architecture is built.
|
||||
function os::build::build_binaries() {
|
||||
if [[ $# -eq 0 ]]; then
|
||||
return
|
||||
fi
|
||||
local -a binaries=( "$@" )
|
||||
# Create a sub-shell so that we don't pollute the outer environment
|
||||
( os::build::internal::build_binaries "${binaries[@]+"${binaries[@]}"}" )
|
||||
}
|
||||
|
||||
# Build binary targets specified. Should always be run in a sub-shell so we don't leak GOBIN
|
||||
#
|
||||
# Input:
|
||||
# $@ - targets and go flags. If no targets are set then all binaries targets
|
||||
# are built.
|
||||
# OS_BUILD_PLATFORMS - Incoming variable of targets to build for. If unset
|
||||
# then just the host architecture is built.
|
||||
os::build::internal::build_binaries() {
|
||||
# Check for `go` binary and set ${GOPATH}.
|
||||
os::build::setup_env
|
||||
|
||||
# Fetch the version.
|
||||
local version_ldflags
|
||||
version_ldflags=$(os::build::ldflags)
|
||||
|
||||
local goflags
|
||||
# Use eval to preserve embedded quoted strings.
|
||||
eval "goflags=(${OS_GOFLAGS:-})"
|
||||
|
||||
local arg
|
||||
for arg; do
|
||||
if [[ "${arg}" == -* ]]; then
|
||||
# Assume arguments starting with a dash are flags to pass to go.
|
||||
goflags+=("${arg}")
|
||||
fi
|
||||
done
|
||||
|
||||
os::build::export_targets "$@"
|
||||
|
||||
if [[ ! "${targets[@]:+${targets[@]}}" || ! "${binaries[@]:+${binaries[@]}}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local -a nonstatics=()
|
||||
local -a tests=()
|
||||
for binary in "${binaries[@]-}"; do
|
||||
if [[ "${binary}" =~ ".test"$ ]]; then
|
||||
tests+=($binary)
|
||||
else
|
||||
nonstatics+=($binary)
|
||||
fi
|
||||
done
|
||||
|
||||
local pkgdir="${OS_OUTPUT_PKGDIR}"
|
||||
if [[ "${CGO_ENABLED-}" == "0" ]]; then
|
||||
pkgdir+="/static"
|
||||
fi
|
||||
|
||||
local host_platform=$(os::build::host_platform)
|
||||
local platform
|
||||
for platform in "${platforms[@]+"${platforms[@]}"}"; do
|
||||
echo "++ Building go targets for ${platform}:" "${targets[@]}"
|
||||
mkdir -p "${OS_OUTPUT_BINPATH}/${platform}"
|
||||
|
||||
# output directly to the desired location
|
||||
if [[ $platform == $host_platform ]]; then
|
||||
export GOBIN="${OS_OUTPUT_BINPATH}/${platform}"
|
||||
else
|
||||
unset GOBIN
|
||||
fi
|
||||
|
||||
local platform_gotags_envvar=OS_GOFLAGS_TAGS_$(os::build::platform_arch ${platform})
|
||||
local platform_gotags_test_envvar=OS_GOFLAGS_TAGS_TEST_$(os::build::platform_arch ${platform})
|
||||
|
||||
# work around https://github.com/golang/go/issues/11887
|
||||
local local_ldflags="${version_ldflags}"
|
||||
if [[ "${platform}" == "darwin/amd64" ]]; then
|
||||
local_ldflags+=" -s"
|
||||
fi
|
||||
|
||||
#Add Windows File Properties/Version Info and Icon Resource for oc.exe
|
||||
if [[ "$platform" == "windows/amd64" ]]; then
|
||||
os::build::generate_windows_versioninfo
|
||||
fi
|
||||
|
||||
if [[ ${#nonstatics[@]} -gt 0 ]]; then
|
||||
GOOS=${platform%/*} GOARCH=${platform##*/} go install \
|
||||
-pkgdir "${pkgdir}/${platform}" \
|
||||
-tags "${OS_GOFLAGS_TAGS-} ${!platform_gotags_envvar:-}" \
|
||||
-ldflags="${local_ldflags}" \
|
||||
"${goflags[@]:+${goflags[@]}}" \
|
||||
"${nonstatics[@]}"
|
||||
|
||||
# GOBIN is not supported on cross-compile in Go 1.5+ - move to the correct target
|
||||
if [[ $platform != $host_platform ]]; then
|
||||
local platform_src="/${platform//\//_}"
|
||||
mv "${OS_TARGET_BIN}/${platform_src}/"* "${OS_OUTPUT_BINPATH}/${platform}/"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "$platform" == "windows/amd64" ]]; then
|
||||
os::build::clean_windows_versioninfo
|
||||
fi
|
||||
|
||||
for test in "${tests[@]:+${tests[@]}}"; do
|
||||
local outfile="${OS_OUTPUT_BINPATH}/${platform}/$(basename ${test})"
|
||||
# disabling cgo allows use of delve
|
||||
CGO_ENABLED="${OS_TEST_CGO_ENABLED:-}" GOOS=${platform%/*} GOARCH=${platform##*/} go test \
|
||||
-pkgdir "${pkgdir}/${platform}" \
|
||||
-tags "${OS_GOFLAGS_TAGS-} ${!platform_gotags_test_envvar:-}" \
|
||||
-ldflags "${local_ldflags}" \
|
||||
-i -c -o "${outfile}" \
|
||||
"${goflags[@]:+${goflags[@]}}" \
|
||||
"$(dirname ${test})"
|
||||
done
|
||||
done
|
||||
}
|
||||
readonly -f os::build::build_binaries
|
||||
|
||||
# Generates the set of target packages, binaries, and platforms to build for.
|
||||
# Accepts binaries via $@, and platforms via OS_BUILD_PLATFORMS, or defaults to
|
||||
# the current platform.
|
||||
function os::build::export_targets() {
|
||||
platforms=("${OS_BUILD_PLATFORMS[@]:+${OS_BUILD_PLATFORMS[@]}}")
|
||||
|
||||
targets=()
|
||||
local arg
|
||||
for arg; do
|
||||
if [[ "${arg}" != -* ]]; then
|
||||
targets+=("${arg}")
|
||||
fi
|
||||
done
|
||||
|
||||
binaries=($(os::build::binaries_from_targets "${targets[@]-}"))
|
||||
}
|
||||
readonly -f os::build::export_targets
|
||||
|
||||
# This will take $@ from $GOPATH/bin and copy them to the appropriate
|
||||
# place in ${OS_OUTPUT_BINDIR}
|
||||
#
|
||||
# If OS_RELEASE_ARCHIVE is set, tar archives prefixed with OS_RELEASE_ARCHIVE for
|
||||
# each of OS_BUILD_PLATFORMS are created.
|
||||
#
|
||||
# Ideally this wouldn't be necessary and we could just set GOBIN to
|
||||
# OS_OUTPUT_BINDIR but that won't work in the face of cross compilation. 'go
|
||||
# install' will place binaries that match the host platform directly in $GOBIN
|
||||
# while placing cross compiled binaries into `platform_arch` subdirs. This
|
||||
# complicates pretty much everything else we do around packaging and such.
|
||||
function os::build::place_bins() {
|
||||
(
|
||||
local host_platform
|
||||
host_platform=$(os::build::host_platform)
|
||||
|
||||
if [[ "${OS_RELEASE_ARCHIVE-}" != "" ]]; then
|
||||
os::build::version::get_vars
|
||||
mkdir -p "${OS_OUTPUT_RELEASEPATH}"
|
||||
fi
|
||||
|
||||
os::build::export_targets "$@"
|
||||
for platform in "${platforms[@]+"${platforms[@]}"}"; do
|
||||
# The substitution on platform_src below will replace all slashes with
|
||||
# underscores. It'll transform darwin/amd64 -> darwin_amd64.
|
||||
local platform_src="/${platform//\//_}"
|
||||
|
||||
# Skip this directory if the platform has no binaries.
|
||||
if [[ ! -d "${OS_OUTPUT_BINPATH}/${platform}" ]]; then
|
||||
continue
|
||||
fi
|
||||
|
||||
# Create an array of binaries to release. Append .exe variants if the platform is windows.
|
||||
local -a binaries=()
|
||||
for binary in "${targets[@]}"; do
|
||||
binary=$(basename $binary)
|
||||
if [[ $platform == "windows/amd64" ]]; then
|
||||
binaries+=("${binary}.exe")
|
||||
else
|
||||
binaries+=("${binary}")
|
||||
fi
|
||||
done
|
||||
done
|
||||
)
|
||||
}
|
||||
readonly -f os::build::place_bins
|
||||
|
||||
# os::build::ldflag() abstracts ldflag inconsistency across Go versions.
|
||||
# TODO: remove
|
||||
function os::build::ldflag() {
|
||||
local key=${1}
|
||||
local val=${2}
|
||||
|
||||
echo "-X ${key}=${val}"
|
||||
}
|
||||
readonly -f os::build::ldflag
|
||||
|
||||
# os::build::require_clean_tree exits if the current Git tree is not clean.
|
||||
function os::build::require_clean_tree() {
|
||||
if ! git diff-index --quiet HEAD -- || test $(git ls-files --exclude-standard --others | wc -l) != 0; then
|
||||
echo "You can't have any staged or dirty files in $(pwd) for this command."
|
||||
echo "Either commit them or unstage them to continue."
|
||||
exit 1
|
||||
fi
|
||||
}
|
||||
readonly -f os::build::require_clean_tree
|
||||
|
||||
# os::build::commit_range takes one or two arguments - if the first argument is an
|
||||
# integer, it is assumed to be a pull request and the local origin/pr/# branch is
|
||||
# used to determine the common range with the second argument. If the first argument
|
||||
# is not an integer, it is assumed to be a Git commit range and output directly.
|
||||
function os::build::commit_range() {
|
||||
local remote
|
||||
remote="${UPSTREAM_REMOTE:-origin}"
|
||||
if [[ "$1" =~ ^-?[0-9]+$ ]]; then
|
||||
local target
|
||||
target="$(git rev-parse ${remote}/pr/$1)"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Branch does not exist, or you have not configured ${remote}/pr/* style branches from GitHub" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
local base
|
||||
base="$(git merge-base ${target} $2)"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Branch has no common commits with $2" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
if [[ "${base}" == "${target}" ]]; then
|
||||
|
||||
# DO NOT TRUST THIS CODE
|
||||
merged="$(git rev-list --reverse ${target}..$2 --ancestry-path | head -1)"
|
||||
if [[ -z "${merged}" ]]; then
|
||||
echo "Unable to find the commit that merged ${remote}/pr/$1" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
#if [[ $? -ne 0 ]]; then
|
||||
# echo "Unable to find the merge commit for $1: ${merged}" 1>&2
|
||||
# exit 1
|
||||
#fi
|
||||
echo "++ pr/$1 appears to have merged at ${merged}" 1>&2
|
||||
leftparent="$(git rev-list --parents -n 1 ${merged} | cut -f2 -d ' ')"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to find the left-parent for the merge of for $1" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
base="$(git merge-base ${target} ${leftparent})"
|
||||
if [[ $? -ne 0 ]]; then
|
||||
echo "Unable to find the common commit between ${leftparent} and $1" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
echo "${base}..${target}"
|
||||
exit 0
|
||||
#echo "Branch has already been merged to upstream master, use explicit range instead" 1>&2
|
||||
#exit 1
|
||||
fi
|
||||
|
||||
echo "${base}...${target}"
|
||||
exit 0
|
||||
fi
|
||||
|
||||
echo "$1"
|
||||
}
|
||||
readonly -f os::build::commit_range
|
||||
281
hack/lib/build/environment.sh
Normal file
281
hack/lib/build/environment.sh
Normal file
@@ -0,0 +1,281 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script holds library functions for setting up the Docker container build environment
|
||||
|
||||
# os::build::environment::create creates a docker container with the default variables.
|
||||
# arguments are passed directly to the container, OS_BUILD_ENV_GOLANG, OS_BUILD_ENV_IMAGE,
|
||||
# and OS_RELEASE_DOCKER_ARGS can be used to customize the container. The docker socket
|
||||
# is mounted by default and the output of the command is the container id.
|
||||
function os::build::environment::create() {
|
||||
set -o errexit
|
||||
local release_image="${OS_BUILD_ENV_IMAGE}"
|
||||
local additional_context="${OS_BUILD_ENV_DOCKER_ARGS:-}"
|
||||
|
||||
local workingdir
|
||||
workingdir=$( os::build::environment::release::workingdir )
|
||||
additional_context+=" -w ${workingdir}"
|
||||
|
||||
if [[ "${OS_BUILD_ENV_USE_DOCKER:-y}" == "y" ]]; then
|
||||
additional_context+=" --privileged -v /var/run/docker.sock:/var/run/docker.sock"
|
||||
|
||||
if [[ "${OS_BUILD_ENV_LOCAL_DOCKER:-n}" == "y" ]]; then
|
||||
# if OS_BUILD_ENV_LOCAL_DOCKER==y, add the local OS_ROOT as the bind mount to the working dir
|
||||
# and set the running user to the current user
|
||||
additional_context+=" -v ${OS_ROOT}:${workingdir} -u $(id -u)"
|
||||
elif [[ -n "${OS_BUILD_ENV_VOLUME:-}" ]]; then
|
||||
if docker volume inspect "${OS_BUILD_ENV_VOLUME}" >/dev/null 2>&1; then
|
||||
os::log::debug "Re-using volume ${OS_BUILD_ENV_VOLUME}"
|
||||
else
|
||||
# if OS_BUILD_ENV_VOLUME is set and no volume already exists, create a docker volume to
|
||||
# store the working output so successive iterations can reuse shared code.
|
||||
os::log::debug "Creating volume ${OS_BUILD_ENV_VOLUME}"
|
||||
docker volume create --name "${OS_BUILD_ENV_VOLUME}" > /dev/null
|
||||
fi
|
||||
|
||||
if [[ -n "${OS_BUILD_ENV_TMP_VOLUME:-}" ]]; then
|
||||
if docker volume inspect "${OS_BUILD_ENV_TMP_VOLUME}" >/dev/null 2>&1; then
|
||||
os::log::debug "Re-using volume ${OS_BUILD_ENV_TMP_VOLUME}"
|
||||
else
|
||||
# if OS_BUILD_ENV_VOLUME is set and no volume already exists, create a docker volume to
|
||||
# store the working output so successive iterations can reuse shared code.
|
||||
os::log::debug "Creating volume ${OS_BUILD_ENV_TMP_VOLUME}"
|
||||
docker volume create --name "${OS_BUILD_ENV_TMP_VOLUME}" >/dev/null
|
||||
fi
|
||||
additional_context+=" -v ${OS_BUILD_ENV_TMP_VOLUME}:/tmp"
|
||||
fi
|
||||
additional_context+=" -v ${OS_BUILD_ENV_VOLUME}:${workingdir}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${OS_BUILD_ENV_FROM_ARCHIVE-}" ]]; then
|
||||
additional_context+=" -e OS_VERSION_FILE=/tmp/os-version-defs"
|
||||
else
|
||||
additional_context+=" -e OS_VERSION_FILE="
|
||||
fi
|
||||
|
||||
declare -a cmd=( )
|
||||
declare -a env=( )
|
||||
local prefix=1
|
||||
for arg in "${@:1}"; do
|
||||
if [[ "${arg}" != *"="* ]]; then
|
||||
prefix=0
|
||||
fi
|
||||
if [[ "${prefix}" -eq 1 ]]; then
|
||||
env+=( "-e" "${arg}" )
|
||||
else
|
||||
cmd+=( "${arg}" )
|
||||
fi
|
||||
done
|
||||
if [[ -t 0 ]]; then
|
||||
if [[ "${#cmd[@]}" -eq 0 ]]; then
|
||||
cmd=( "/bin/sh" )
|
||||
fi
|
||||
if [[ "${cmd[0]}" == "/bin/sh" || "${cmd[0]}" == "/bin/bash" ]]; then
|
||||
additional_context+=" -it"
|
||||
else
|
||||
# container exit races with log collection so we
|
||||
# need to sleep at the end but preserve the exit
|
||||
# code of whatever the user asked for us to run
|
||||
cmd=( '/bin/bash' '-c' "${cmd[*]}; return_code=\$?; sleep 1; exit \${return_code}" )
|
||||
fi
|
||||
fi
|
||||
|
||||
# Create a new container from the release environment
|
||||
os::log::debug "Creating container: \`docker create ${additional_context} ${env[@]+"${env[@]}"} ${release_image} ${cmd[@]+"${cmd[@]}"}"
|
||||
docker create ${additional_context} "${env[@]+"${env[@]}"}" "${release_image}" "${cmd[@]+"${cmd[@]}"}"
|
||||
}
|
||||
readonly -f os::build::environment::create
|
||||
|
||||
# os::build::environment::release::workingdir calculates the working directory for the current
|
||||
# release image.
|
||||
function os::build::environment::release::workingdir() {
|
||||
if [[ -n "${OS_BUILD_ENV_WORKINGDIR-}" ]]; then
|
||||
echo "${OS_BUILD_ENV_WORKINGDIR}"
|
||||
return 0
|
||||
fi
|
||||
set -o errexit
|
||||
# get working directory
|
||||
local container
|
||||
container="$(docker create "${release_image}")"
|
||||
local workingdir
|
||||
workingdir="$(docker inspect -f '{{ index . "Config" "WorkingDir" }}' "${container}")"
|
||||
docker rm "${container}" > /dev/null
|
||||
echo "${workingdir}"
|
||||
}
|
||||
readonly -f os::build::environment::release::workingdir
|
||||
|
||||
# os::build::environment::cleanup stops and removes the container named in the argument
|
||||
# (unless OS_BUILD_ENV_LEAVE_CONTAINER is set, in which case it will only stop the container).
|
||||
function os::build::environment::cleanup() {
|
||||
local container=$1
|
||||
local volume=$2
|
||||
local tmp_volume=$3
|
||||
os::log::debug "Stopping container ${container}"
|
||||
docker stop --time=0 "${container}" > /dev/null || true
|
||||
if [[ -z "${OS_BUILD_ENV_LEAVE_CONTAINER:-}" ]]; then
|
||||
os::log::debug "Removing container ${container}"
|
||||
docker rm "${container}" > /dev/null
|
||||
|
||||
if [[ -z "${OS_BUILD_ENV_REUSE_TMP_VOLUME:-}" ]]; then
|
||||
os::log::debug "Removing tmp build volume"
|
||||
os::build::environment::remove_volume "${tmp_volume}"
|
||||
fi
|
||||
if [[ -n "${OS_BUILD_ENV_CLEAN_BUILD_VOLUME:-}" ]]; then
|
||||
os::log::debug "Removing build volume"
|
||||
os::build::environment::remove_volume "${volume}"
|
||||
fi
|
||||
fi
|
||||
}
|
||||
readonly -f os::build::environment::cleanup
|
||||
|
||||
# os::build::environment::start starts the container provided as the first argument
|
||||
# using whatever content exists in the container already.
|
||||
function os::build::environment::start() {
|
||||
local container=$1
|
||||
|
||||
os::log::debug "Starting container ${container}"
|
||||
if [[ "$( docker inspect --type container -f '{{ .Config.OpenStdin }}' "${container}" )" == "true" ]]; then
|
||||
docker start -ia "${container}"
|
||||
else
|
||||
docker start "${container}" > /dev/null
|
||||
os::log::debug "Following container logs"
|
||||
docker logs -f "${container}"
|
||||
fi
|
||||
|
||||
local exitcode
|
||||
exitcode="$( docker inspect --type container -f '{{ .State.ExitCode }}' "${container}" )"
|
||||
|
||||
os::log::debug "Container exited with ${exitcode}"
|
||||
|
||||
# extract content from the image
|
||||
if [[ -n "${OS_BUILD_ENV_PRESERVE-}" ]]; then
|
||||
local workingdir
|
||||
workingdir="$(docker inspect -f '{{ index . "Config" "WorkingDir" }}' "${container}")"
|
||||
local oldIFS="${IFS}"
|
||||
IFS=:
|
||||
for path in ${OS_BUILD_ENV_PRESERVE}; do
|
||||
local parent=.
|
||||
if [[ "${path}" != "." ]]; then
|
||||
parent="$( dirname "${path}" )"
|
||||
mkdir -p "${parent}"
|
||||
fi
|
||||
os::log::debug "Copying from ${container}:${workingdir}/${path} to ${parent}"
|
||||
if ! output="$( docker cp "${container}:${workingdir}/${path}" "${parent}" 2>&1 )"; then
|
||||
os::log::warning "Copying ${path} from the container failed!"
|
||||
os::log::warning "${output}"
|
||||
fi
|
||||
done
|
||||
IFS="${oldIFS}"
|
||||
fi
|
||||
return "${exitcode}"
|
||||
}
|
||||
readonly -f os::build::environment::start
|
||||
|
||||
# os::build::environment::withsource starts the container provided as the first argument
|
||||
# after copying in the contents of the current Git repository at HEAD (or, if specified,
|
||||
# the ref specified in the second argument).
|
||||
function os::build::environment::withsource() {
|
||||
local container=$1
|
||||
local commit=${2:-HEAD}
|
||||
|
||||
if [[ -n "${OS_BUILD_ENV_LOCAL_DOCKER-}" ]]; then
|
||||
os::build::environment::start "${container}"
|
||||
return
|
||||
fi
|
||||
|
||||
local workingdir
|
||||
workingdir="$(docker inspect -f '{{ index . "Config" "WorkingDir" }}' "${container}")"
|
||||
|
||||
if [[ -n "${OS_BUILD_ENV_FROM_ARCHIVE-}" ]]; then
|
||||
# Generate version definitions. Tree state is clean because we are pulling from git directly.
|
||||
OS_GIT_TREE_STATE=clean os::build::version::get_vars
|
||||
os::build::version::save_vars > "/tmp/os-version-defs"
|
||||
|
||||
os::log::debug "Generating source code archive"
|
||||
tar -cf - -C /tmp/ os-version-defs | docker cp - "${container}:/tmp"
|
||||
git archive --format=tar "${commit}" | docker cp - "${container}:${workingdir}"
|
||||
os::build::environment::start "${container}"
|
||||
return
|
||||
fi
|
||||
|
||||
local excluded=()
|
||||
local oldIFS="${IFS}"
|
||||
IFS=:
|
||||
for exclude in ${OS_BUILD_ENV_EXCLUDE:-_output}; do
|
||||
excluded+=("--exclude=${exclude}")
|
||||
done
|
||||
IFS="${oldIFS}"
|
||||
if which rsync &>/dev/null && [[ -n "${OS_BUILD_ENV_VOLUME-}" ]]; then
|
||||
os::log::debug "Syncing source using \`rsync\`"
|
||||
if ! rsync -a --blocking-io "${excluded[@]}" --delete --omit-dir-times --numeric-ids -e "docker run --rm -i -v \"${OS_BUILD_ENV_VOLUME}:${workingdir}\" --entrypoint=/bin/bash \"${OS_BUILD_ENV_IMAGE}\" -c '\$@'" . remote:"${workingdir}"; then
|
||||
os::log::debug "Falling back to \`tar\` and \`docker cp\` as \`rsync\` is not in container"
|
||||
tar -cf - "${excluded[@]}" . | docker cp - "${container}:${workingdir}"
|
||||
fi
|
||||
else
|
||||
os::log::debug "Syncing source using \`tar\` and \`docker cp\`"
|
||||
tar -cf - "${excluded[@]}" . | docker cp - "${container}:${workingdir}"
|
||||
fi
|
||||
|
||||
os::build::environment::start "${container}"
|
||||
}
|
||||
readonly -f os::build::environment::withsource
|
||||
|
||||
function os::build::environment::volume_name() {
|
||||
local prefix=$1
|
||||
local commit=$2
|
||||
local volume=$3
|
||||
|
||||
if [[ -z "${volume}" ]]; then
|
||||
volume="${prefix}-$( git rev-parse "${commit}" )"
|
||||
fi
|
||||
|
||||
echo "${volume}" | tr '[:upper:]' '[:lower:]'
|
||||
}
|
||||
readonly -f os::build::environment::volume_name
|
||||
|
||||
function os::build::environment::remove_volume() {
|
||||
local volume=$1
|
||||
|
||||
if docker volume inspect "${volume}" >/dev/null 2>&1; then
|
||||
os::log::debug "Removing volume ${volume}"
|
||||
docker volume rm "${volume}" >/dev/null
|
||||
fi
|
||||
}
|
||||
readonly -f os::build::environment::remove_volume
|
||||
|
||||
# os::build::environment::run launches the container with the provided arguments and
|
||||
# the current commit (defaults to HEAD). The container is automatically cleaned up.
|
||||
function os::build::environment::run() {
|
||||
local commit="${OS_GIT_COMMIT:-HEAD}"
|
||||
local volume
|
||||
local tmp_volume
|
||||
|
||||
volume="$( os::build::environment::volume_name "origin-build" "${commit}" "${OS_BUILD_ENV_REUSE_VOLUME:-}" )"
|
||||
tmp_volume="$( os::build::environment::volume_name "origin-build-tmp" "${commit}" "${OS_BUILD_ENV_REUSE_TMP_VOLUME:-}" )"
|
||||
|
||||
export OS_BUILD_ENV_VOLUME="${volume}"
|
||||
export OS_BUILD_ENV_TMP_VOLUME="${tmp_volume}"
|
||||
|
||||
if [[ -n "${OS_BUILD_ENV_VOLUME_FORCE_NEW:-}" ]]; then
|
||||
os::build::environment::remove_volume "${volume}"
|
||||
os::build::environment::remove_volume "${tmp_volume}"
|
||||
fi
|
||||
|
||||
if [[ -n "${OS_BUILD_ENV_PULL_IMAGE:-}" ]]; then
|
||||
os::log::info "Pulling the ${OS_BUILD_ENV_IMAGE} image to update it..."
|
||||
docker pull "${OS_BUILD_ENV_IMAGE}"
|
||||
fi
|
||||
|
||||
os::log::debug "Using commit ${commit}"
|
||||
os::log::debug "Using volume ${volume}"
|
||||
os::log::debug "Using tmp volume ${tmp_volume}"
|
||||
|
||||
local container
|
||||
container="$( os::build::environment::create "$@" )"
|
||||
trap "os::build::environment::cleanup ${container} ${volume} ${tmp_volume}" EXIT
|
||||
|
||||
os::log::debug "Using container ${container}"
|
||||
|
||||
os::build::environment::withsource "${container}" "${commit}"
|
||||
}
|
||||
readonly -f os::build::environment::run
|
||||
144
hack/lib/build/images.sh
Normal file
144
hack/lib/build/images.sh
Normal file
@@ -0,0 +1,144 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This library holds utility functions for building container images.
|
||||
|
||||
# os::build::image builds an image from a directory, to a tag or tags The default
|
||||
# behavior is to use the imagebuilder binary if it is available on the path with
|
||||
# fallback to docker build if it is not available.
|
||||
#
|
||||
# Globals:
|
||||
# - OS_BUILD_IMAGE_ARGS
|
||||
# - OS_BUILD_IMAGE_NUM_RETRIES
|
||||
# Arguments:
|
||||
# - 1: the directory in which to build
|
||||
# - 2: the tag to apply to the image
|
||||
# Returns:
|
||||
# None
|
||||
function os::build::image() {
|
||||
local tag=$1
|
||||
local directory=$2
|
||||
local extra_tag
|
||||
|
||||
if [[ ! "${tag}" == *":"* ]]; then
|
||||
# if no tag was specified in the image name,
|
||||
# tag with :latest and the release commit, if
|
||||
# available, falling back to the last commit
|
||||
# if no release commit is recorded
|
||||
local release_commit
|
||||
release_commit="${OS_RELEASE_COMMIT-}"
|
||||
if [[ -z "${release_commit}" && -f "${OS_OUTPUT_RELEASEPATH}/.commit" ]]; then
|
||||
release_commit="$( cat "${OS_OUTPUT_RELEASEPATH}/.commit" )"
|
||||
fi
|
||||
if [[ -z "${release_commit}" ]]; then
|
||||
release_commit="$( git log -1 --pretty=%h )"
|
||||
fi
|
||||
extra_tag="${tag}:${release_commit}"
|
||||
|
||||
tag="${tag}:latest"
|
||||
fi
|
||||
|
||||
local result=1
|
||||
local image_build_log
|
||||
image_build_log="$( mktemp "${BASETMPDIR}/imagelogs.XXXXX" )"
|
||||
for (( i = 0; i < "${OS_BUILD_IMAGE_NUM_RETRIES:-2}"; i++ )); do
|
||||
if [[ "${i}" -gt 0 ]]; then
|
||||
os::log::internal::prefix_lines "[${tag%:*}]" "$( cat "${image_build_log}" )"
|
||||
os::log::warning "Retrying image build for ${tag}, attempt ${i}..."
|
||||
fi
|
||||
|
||||
if os::build::image::internal::generic "${tag}" "${directory}" "${extra_tag:-}" >"${image_build_log}" 2>&1; then
|
||||
result=0
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
os::log::internal::prefix_lines "[${tag%:*}]" "$( cat "${image_build_log}" )"
|
||||
return "${result}"
|
||||
}
|
||||
readonly -f os::build::image
|
||||
|
||||
# os::build::image::internal::generic builds a container image using either imagebuilder
|
||||
# or docker, defaulting to imagebuilder if present
|
||||
#
|
||||
# Globals:
|
||||
# - OS_BUILD_IMAGE_ARGS
|
||||
# Arguments:
|
||||
# - 1: the directory in which to build
|
||||
# - 2: the tag to apply to the image
|
||||
# - 3: optionally, extra tags to add
|
||||
# Returns:
|
||||
# None
|
||||
function os::build::image::internal::generic() {
|
||||
local directory=$2
|
||||
|
||||
local result=1
|
||||
if os::util::find::system_binary 'imagebuilder' >/dev/null; then
|
||||
if os::build::image::internal::imagebuilder "$@"; then
|
||||
result=0
|
||||
fi
|
||||
else
|
||||
os::log::warning "Unable to locate 'imagebuilder' on PATH, falling back to Docker build"
|
||||
if os::build::image::internal::docker "$@"; then
|
||||
result=0
|
||||
fi
|
||||
fi
|
||||
|
||||
# ensure the temporary contents are cleaned up
|
||||
git clean -fdx "${directory}"
|
||||
return "${result}"
|
||||
}
|
||||
readonly -f os::build::image::internal::generic
|
||||
|
||||
# os::build::image::internal::imagebuilder builds a container image using imagebuilder
|
||||
#
|
||||
# Globals:
|
||||
# - OS_BUILD_IMAGE_ARGS
|
||||
# Arguments:
|
||||
# - 1: the directory in which to build
|
||||
# - 2: the tag to apply to the image
|
||||
# - 3: optionally, extra tags to add
|
||||
# Returns:
|
||||
# None
|
||||
function os::build::image::internal::imagebuilder() {
|
||||
local tag=$1
|
||||
local directory=$2
|
||||
local extra_tag="${3-}"
|
||||
local options=()
|
||||
|
||||
if [[ -n "${OS_BUILD_IMAGE_ARGS:-}" ]]; then
|
||||
options=( ${OS_BUILD_IMAGE_ARGS} )
|
||||
fi
|
||||
|
||||
if [[ -n "${extra_tag}" ]]; then
|
||||
options+=( -t "${extra_tag}" )
|
||||
fi
|
||||
|
||||
imagebuilder "${options[@]:-}" -t "${tag}" "${directory}"
|
||||
}
|
||||
readonly -f os::build::image::internal::imagebuilder
|
||||
|
||||
# os::build::image::internal::docker builds a container image using docker
|
||||
#
|
||||
# Globals:
|
||||
# - OS_BUILD_IMAGE_ARGS
|
||||
# Arguments:
|
||||
# - 1: the directory in which to build
|
||||
# - 2: the tag to apply to the image
|
||||
# - 3: optionally, extra tags to add
|
||||
# Returns:
|
||||
# None
|
||||
function os::build::image::internal::docker() {
|
||||
local tag=$1
|
||||
local directory=$2
|
||||
local extra_tag="${3-}"
|
||||
local options=()
|
||||
|
||||
if ! docker build ${OS_BUILD_IMAGE_ARGS:-} -t "${tag}" "${directory}"; then
|
||||
return 1
|
||||
fi
|
||||
|
||||
if [[ -n "${extra_tag}" ]]; then
|
||||
docker tag "${tag}" "${extra_tag}"
|
||||
fi
|
||||
}
|
||||
readonly -f os::build::image::internal::docker
|
||||
14
hack/lib/build/release.sh
Normal file
14
hack/lib/build/release.sh
Normal file
@@ -0,0 +1,14 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This library holds utility functions for building releases.
|
||||
|
||||
# os::build::release::check_for_rpms checks that an RPM release has been built
|
||||
function os::build::release::check_for_rpms() {
|
||||
if [[ ! -d "${OS_OUTPUT_RPMPATH}" || ! -d "${OS_OUTPUT_RPMPATH}/repodata" ]]; then
|
||||
relative_release_path="$( os::util::repository_relative_path "${OS_OUTPUT_RELEASEPATH}" )"
|
||||
relative_bin_path="$( os::util::repository_relative_path "${OS_OUTPUT_BINPATH}" )"
|
||||
os::log::fatal "No release RPMs have been built! RPMs are necessary to build container images.
|
||||
Build them with:
|
||||
$ OS_BUILD_ENV_PRESERVE=${relative_bin_path}:${relative_release_path} hack/env make build-rpms"
|
||||
fi
|
||||
}
|
||||
95
hack/lib/build/rpm.sh
Normal file
95
hack/lib/build/rpm.sh
Normal file
@@ -0,0 +1,95 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This library holds utilities for building RPMs from Origin.
|
||||
|
||||
# os::build::rpm::generate_nevra_vars determines the NEVRA of the RPMs
|
||||
# that would be built from the current git state.
|
||||
#
|
||||
# Globals:
|
||||
# - OS_GIT_VERSION
|
||||
# Arguments:
|
||||
# - None
|
||||
# Exports:
|
||||
# - OS_RPM_VERSION
|
||||
# - OS_RPM_RELEASE
|
||||
# - OS_RPM_ARCHITECTURE
|
||||
function os::build::rpm::get_nvra_vars() {
|
||||
# the package name can be overwritten but is normally 'origin'
|
||||
OS_RPM_ARCHITECTURE="$(uname -i)"
|
||||
|
||||
# we can extract the pacakge version from the build version
|
||||
os::build::version::get_vars
|
||||
if [[ "${OS_GIT_VERSION}" =~ ^v([0-9](\.[0-9]+)*)(.*) ]]; then
|
||||
OS_RPM_VERSION="${BASH_REMATCH[1]}"
|
||||
metadata="${BASH_REMATCH[3]}"
|
||||
else
|
||||
os::log::fatal "Malformed \$OS_GIT_VERSION: ${OS_GIT_VERSION}"
|
||||
fi
|
||||
|
||||
# we can generate the package release from the git version metadata
|
||||
# OS_GIT_VERSION will always have metadata, but either contain
|
||||
# pre-release information _and_ build metadata, or only the latter.
|
||||
# Build metadata may or may not contain the number of commits past
|
||||
# the last tag. If no commit number exists, we are on a tag and use 0.
|
||||
# ex.
|
||||
# -alpha.0+shasums-123-dirty
|
||||
# -alpha.0+shasums-123
|
||||
# -alpha.0+shasums-dirty
|
||||
# -alpha.0+shasums
|
||||
# +shasums-123-dirty
|
||||
# +shasums-123
|
||||
# +shasums-dirty
|
||||
# +shasums
|
||||
if [[ "${metadata:0:1}" == "+" ]]; then
|
||||
# we only have build metadata, but need to massage it so
|
||||
# we can generate a valid RPM release from it
|
||||
if [[ "${metadata}" =~ ^\+([a-z0-9]{7,40})(-([0-9]+))?(-dirty)?$ ]]; then
|
||||
build_sha="${BASH_REMATCH[1]}"
|
||||
build_num="${BASH_REMATCH[3]:-0}"
|
||||
else
|
||||
os::log::fatal "Malformed git version metadata: ${metadata}"
|
||||
fi
|
||||
OS_RPM_RELEASE="1.${build_num}.${build_sha}"
|
||||
elif [[ "${metadata:0:1}" == "-" ]]; then
|
||||
# we have both build metadata and pre-release info
|
||||
if [[ "${metadata}" =~ ^-([^\+]+)\+([a-z0-9]{7,40})(-([0-9]+))?(-dirty)?$ ]]; then
|
||||
pre_release="${BASH_REMATCH[1]}"
|
||||
build_sha="${BASH_REMATCH[2]}"
|
||||
build_num="${BASH_REMATCH[4]:-0}"
|
||||
else
|
||||
os::log::fatal "Malformed git version metadata: ${metadata}"
|
||||
fi
|
||||
OS_RPM_RELEASE="0.${pre_release}.${build_num}.${build_sha}"
|
||||
else
|
||||
os::log::fatal "Malformed git version metadata: ${metadata}"
|
||||
fi
|
||||
|
||||
OS_RPM_GIT_VARS=$( os::build::version::save_vars | tr '\n' ' ' )
|
||||
|
||||
export OS_RPM_VERSION OS_RPM_RELEASE OS_RPM_ARCHITECTURE OS_RPM_GIT_VARS
|
||||
}
|
||||
|
||||
|
||||
# os::build::rpm::format_nvra formats the rpm NVRA vars generated by
|
||||
# os::build::rpm::get_nvra_vars and will generate them if necessary
|
||||
#
|
||||
# Globals:
|
||||
# - OS_RPM_NAME
|
||||
# - OS_RPM_VERSION
|
||||
# - OS_RPM_RELEASE
|
||||
# - OS_RPM_ARCHITECTURE
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::build::rpm::format_nvra() {
|
||||
if [[ -z "${OS_RPM_VERSION:-}" || -z "${OS_RPM_RELEASE:-}" ]]; then
|
||||
os::build::rpm::get_nvra_vars
|
||||
fi
|
||||
if [[ -z "${OS_RPM_NAME-}" ]]; then
|
||||
OS_RPM_SPECFILE="$( find "${OS_ROOT}" -name *.spec )"
|
||||
OS_RPM_NAME="$( rpmspec -q --qf '%{name}\n' "${OS_RPM_SPECFILE}" | head -1 )"
|
||||
fi
|
||||
|
||||
echo "${OS_RPM_NAME}-${OS_RPM_VERSION}-${OS_RPM_RELEASE}.${OS_RPM_ARCHITECTURE}"
|
||||
}
|
||||
82
hack/lib/build/version.sh
Normal file
82
hack/lib/build/version.sh
Normal file
@@ -0,0 +1,82 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This library holds utility functions for determining
|
||||
# product versions from Git repository state.
|
||||
|
||||
# os::build::version::get_vars loads the standard version variables as
|
||||
# ENV vars
|
||||
function os::build::version::get_vars() {
|
||||
if [[ -n "${OS_VERSION_FILE-}" ]]; then
|
||||
if [[ -f "${OS_VERSION_FILE}" ]]; then
|
||||
source "${OS_VERSION_FILE}"
|
||||
return
|
||||
fi
|
||||
if [[ ! -d "${OS_ROOT}/.git" ]]; then
|
||||
os::log::fatal "No version file at ${OS_VERSION_FILE}"
|
||||
fi
|
||||
os::log::warning "No version file at ${OS_VERSION_FILE}, falling back to git versions"
|
||||
fi
|
||||
os::build::version::git_vars
|
||||
}
|
||||
readonly -f os::build::version::get_vars
|
||||
|
||||
# os::build::version::git_vars looks up the current Git vars if they have not been calculated.
|
||||
function os::build::version::git_vars() {
|
||||
if [[ -n "${OS_GIT_VERSION-}" ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local git=(git --work-tree "${OS_ROOT}")
|
||||
|
||||
if [[ -n ${OS_GIT_COMMIT-} ]] || OS_GIT_COMMIT=$("${git[@]}" rev-parse --short "HEAD^{commit}" 2>/dev/null); then
|
||||
if [[ -z ${OS_GIT_TREE_STATE-} ]]; then
|
||||
# Check if the tree is dirty. default to dirty
|
||||
if git_status=$("${git[@]}" status --porcelain 2>/dev/null) && [[ -z ${git_status} ]]; then
|
||||
OS_GIT_TREE_STATE="clean"
|
||||
else
|
||||
OS_GIT_TREE_STATE="dirty"
|
||||
fi
|
||||
fi
|
||||
# Use git describe to find the version based on annotated tags.
|
||||
if [[ -n ${OS_GIT_VERSION-} ]] || OS_GIT_VERSION=$("${git[@]}" describe --long --tags --abbrev=7 --match 'v[0-9]*' "${OS_GIT_COMMIT}^{commit}" 2>/dev/null); then
|
||||
# Try to match the "git describe" output to a regex to try to extract
|
||||
# the "major" and "minor" versions and whether this is the exact tagged
|
||||
# version or whether the tree is between two tagged versions.
|
||||
if [[ "${OS_GIT_VERSION}" =~ ^v([0-9]+)\.([0-9]+)\.([0-9]+)(\.[0-9]+)*([-].*)?$ ]]; then
|
||||
OS_GIT_MAJOR=${BASH_REMATCH[1]}
|
||||
OS_GIT_MINOR=${BASH_REMATCH[2]}
|
||||
OS_GIT_PATCH=${BASH_REMATCH[3]}
|
||||
if [[ -n "${BASH_REMATCH[5]}" ]]; then
|
||||
OS_GIT_MINOR+="+"
|
||||
fi
|
||||
fi
|
||||
|
||||
# This translates the "git describe" to an actual semver.org
|
||||
# compatible semantic version that looks something like this:
|
||||
# v1.1.0-alpha.0.6+84c76d1-345
|
||||
OS_GIT_VERSION=$(echo "${OS_GIT_VERSION}" | sed "s/-\([0-9]\{1,\}\)-g\([0-9a-f]\{7,40\}\)$/\+\2-\1/")
|
||||
# If this is an exact tag, remove the last segment.
|
||||
OS_GIT_VERSION=$(echo "${OS_GIT_VERSION}" | sed "s/-0$//")
|
||||
if [[ "${OS_GIT_TREE_STATE}" == "dirty" ]]; then
|
||||
# git describe --dirty only considers changes to existing files, but
|
||||
# that is problematic since new untracked .go files affect the build,
|
||||
# so use our idea of "dirty" from git status instead.
|
||||
OS_GIT_VERSION+="-dirty"
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
}
|
||||
readonly -f os::build::version::git_vars
|
||||
|
||||
# Saves the environment flags to $1
|
||||
function os::build::version::save_vars() {
|
||||
cat <<EOF
|
||||
OS_GIT_COMMIT='${OS_GIT_COMMIT-}'
|
||||
OS_GIT_TREE_STATE='${OS_GIT_TREE_STATE-}'
|
||||
OS_GIT_VERSION='${OS_GIT_VERSION-}'
|
||||
OS_GIT_MAJOR='${OS_GIT_MAJOR-}'
|
||||
OS_GIT_MINOR='${OS_GIT_MINOR-}'
|
||||
OS_GIT_PATCH='${OS_GIT_PATCH-}'
|
||||
EOF
|
||||
}
|
||||
readonly -f os::build::version::save_vars
|
||||
191
hack/lib/cleanup.sh
Normal file
191
hack/lib/cleanup.sh
Normal file
@@ -0,0 +1,191 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This library holds functions that are used to clean up local
|
||||
# system state after other scripts have run.
|
||||
|
||||
# os::cleanup::all will clean up all of the processes and data that
|
||||
# a script leaves around after running. All of the sub-tasks called
|
||||
# from this function should gracefully handle when they do not need
|
||||
# to do anything.
|
||||
#
|
||||
# Globals:
|
||||
# - ARTIFACT_DIR
|
||||
# - SKIP_CLEANUP
|
||||
# - SKIP_TEARDOWN
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::cleanup::all() {
|
||||
if [[ -n "${SKIP_CLEANUP:-}" ]]; then
|
||||
os::log::warning "[CLEANUP] Skipping cleanup routines..."
|
||||
return 0
|
||||
fi
|
||||
|
||||
# All of our cleanup is best-effort, so we do not care
|
||||
# if any specific step fails.
|
||||
set +o errexit
|
||||
|
||||
os::log::info "[CLEANUP] Beginning cleanup routines..."
|
||||
os::cleanup::dump_container_logs
|
||||
os::cleanup::truncate_large_logs
|
||||
|
||||
if [[ -z "${SKIP_TEARDOWN:-}" ]]; then
|
||||
os::cleanup::containers
|
||||
os::cleanup::processes
|
||||
fi
|
||||
}
|
||||
readonly -f os::cleanup::all
|
||||
|
||||
# os::cleanup::containers operates on our containers to stop the containers
|
||||
# and optionally remove the containers and any volumes they had attached.
|
||||
#
|
||||
# Globals:
|
||||
# - SKIP_IMAGE_CLEANUP
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::cleanup::containers() {
|
||||
if ! os::util::find::system_binary docker >/dev/null 2>&1; then
|
||||
os::log::warning "[CLEANUP] No \`docker\` binary found, skipping container cleanup."
|
||||
return
|
||||
fi
|
||||
|
||||
os::log::info "[CLEANUP] Stopping docker containers"
|
||||
for id in $( os::cleanup::internal::list_our_containers ); do
|
||||
os::log::debug "Stopping ${id}"
|
||||
docker stop "${id}" >/dev/null
|
||||
done
|
||||
|
||||
if [[ -n "${SKIP_IMAGE_CLEANUP:-}" ]]; then
|
||||
return
|
||||
fi
|
||||
|
||||
os::log::info "[CLEANUP] Removing docker containers"
|
||||
for id in $( os::cleanup::internal::list_our_containers ); do
|
||||
os::log::debug "Removing ${id}"
|
||||
docker rm --volumes "${id}" >/dev/null
|
||||
done
|
||||
}
|
||||
readonly -f os::cleanup::containers
|
||||
|
||||
# os::cleanup::dump_container_logs operates on k8s containers to dump any logs
|
||||
# from the containers.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::cleanup::dump_container_logs() {
|
||||
if ! os::util::find::system_binary docker >/dev/null 2>&1; then
|
||||
os::log::warning "[CLEANUP] No \`docker\` binary found, skipping container log harvest."
|
||||
return
|
||||
fi
|
||||
|
||||
local container_log_dir="${LOG_DIR}/containers"
|
||||
mkdir -p "${container_log_dir}"
|
||||
|
||||
os::log::info "[CLEANUP] Dumping container logs to $( os::util::repository_relative_path "${container_log_dir}" )"
|
||||
for id in $( os::cleanup::internal::list_our_containers ); do
|
||||
local name; name="$( docker inspect --format '{{ .Name }}' "${id}" )"
|
||||
os::log::debug "Dumping logs for ${id} to ${name}.log"
|
||||
docker logs "${id}" >"${container_log_dir}/${name}.log" 2>&1
|
||||
done
|
||||
}
|
||||
readonly -f os::cleanup::dump_container_logs
|
||||
|
||||
# os::cleanup::internal::list_containers returns a space-delimited list of
|
||||
# docker containers that match a name regex.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# 1 - regex to match on the name
|
||||
# Returns:
|
||||
# None
|
||||
function os::cleanup::internal::list_containers() {
|
||||
local regex="$1"
|
||||
local ids;
|
||||
for short_id in $( docker ps -aq ); do
|
||||
local id; id="$( docker inspect --format '{{ .Id }}' "${short_id}" )"
|
||||
local name; name="$( docker inspect --format '{{ .Name }}' "${id}" )"
|
||||
if [[ "${name}" =~ ${regex} ]]; then
|
||||
ids+=( "${id}" )
|
||||
fi
|
||||
done
|
||||
|
||||
echo "${ids[*]:+"${ids[*]}"}"
|
||||
}
|
||||
readonly -f os::cleanup::internal::list_containers
|
||||
|
||||
# os::cleanup::tmpdir performs cleanup of temp directories as a precondition for running a test. It tries to
|
||||
# clean up mounts in the temp directories.
|
||||
#
|
||||
# Globals:
|
||||
# - BASETMPDIR
|
||||
# - USE_SUDO
|
||||
# Returns:
|
||||
# None
|
||||
function os::cleanup::tmpdir() {
|
||||
os::log::info "[CLEANUP] Cleaning up temporary directories"
|
||||
# ensure that the directories are clean
|
||||
if os::util::find::system_binary "findmnt" &>/dev/null; then
|
||||
for target in $( ${USE_SUDO:+sudo} findmnt --output TARGET --list ); do
|
||||
if [[ "${target}" == "${BASETMPDIR}"* ]]; then
|
||||
${USE_SUDO:+sudo} umount "${target}"
|
||||
fi
|
||||
done
|
||||
fi
|
||||
|
||||
# delete any sub directory underneath BASETMPDIR
|
||||
for directory in $( find "${BASETMPDIR}" -mindepth 2 -maxdepth 2 ); do
|
||||
${USE_SUDO:+sudo} rm -rf "${directory}"
|
||||
done
|
||||
}
|
||||
readonly -f os::cleanup::tmpdir
|
||||
|
||||
# os::cleanup::truncate_large_logs truncates very large files under
|
||||
# $LOG_DIR and $ARTIFACT_DIR so we do not upload them to cloud storage
|
||||
# after CI runs.
|
||||
#
|
||||
# Globals:
|
||||
# - LOG_DIR
|
||||
# - ARTIFACT_DIR
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::cleanup::truncate_large_logs() {
|
||||
local max_file_size="200M"
|
||||
os::log::info "[CLEANUP] Truncating log files over ${max_file_size}"
|
||||
for file in $(find "${ARTIFACT_DIR}" "${LOG_DIR}" -type f -name '*.log' \( -size +${max_file_size} \)); do
|
||||
mv "${file}" "${file}.tmp"
|
||||
echo "LOGFILE TOO LONG ($(du -h "${file}.tmp")), PREVIOUS BYTES TRUNCATED. LAST ${max_file_size} OF LOGFILE:" > "${file}"
|
||||
tail -c ${max_file_size} "${file}.tmp" >> "${file}"
|
||||
rm "${file}.tmp"
|
||||
done
|
||||
}
|
||||
readonly -f os::cleanup::truncate_large_logs
|
||||
|
||||
# os::cleanup::processes kills all processes created by the test
|
||||
# script.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::cleanup::processes() {
|
||||
os::log::info "[CLEANUP] Killing child processes"
|
||||
for job in $( jobs -pr ); do
|
||||
for child in $( pgrep -P "${job}" ); do
|
||||
${USE_SUDO:+sudo} kill "${child}" &> /dev/null
|
||||
done
|
||||
${USE_SUDO:+sudo} kill "${job}" &> /dev/null
|
||||
done
|
||||
}
|
||||
readonly -f os::cleanup::processes
|
||||
619
hack/lib/cmd.sh
Normal file
619
hack/lib/cmd.sh
Normal file
@@ -0,0 +1,619 @@
|
||||
#!/bin/bash
|
||||
# This utility file contains functions that wrap commands to be tested. All wrapper functions run commands
|
||||
# in a sub-shell and redirect all output. Tests in test-cmd *must* use these functions for testing.
|
||||
|
||||
# expect_success runs the cmd and expects an exit code of 0
|
||||
function os::cmd::expect_success() {
|
||||
if [[ $# -ne 1 ]]; then echo "os::cmd::expect_success expects only one argument, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}"
|
||||
}
|
||||
readonly -f os::cmd::expect_success
|
||||
|
||||
# expect_failure runs the cmd and expects a non-zero exit code
|
||||
function os::cmd::expect_failure() {
|
||||
if [[ $# -ne 1 ]]; then echo "os::cmd::expect_failure expects only one argument, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}" "os::cmd::internal::failure_func"
|
||||
}
|
||||
readonly -f os::cmd::expect_failure
|
||||
|
||||
# expect_success_and_text runs the cmd and expects an exit code of 0
|
||||
# as well as running a grep test to find the given string in the output
|
||||
function os::cmd::expect_success_and_text() {
|
||||
if [[ $# -ne 2 ]]; then echo "os::cmd::expect_success_and_text expects two arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local expected_text=$2
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}" "os::cmd::internal::success_func" "${expected_text}"
|
||||
}
|
||||
readonly -f os::cmd::expect_success_and_text
|
||||
|
||||
# expect_failure_and_text runs the cmd and expects a non-zero exit code
|
||||
# as well as running a grep test to find the given string in the output
|
||||
function os::cmd::expect_failure_and_text() {
|
||||
if [[ $# -ne 2 ]]; then echo "os::cmd::expect_failure_and_text expects two arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local expected_text=$2
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}" "os::cmd::internal::failure_func" "${expected_text}"
|
||||
}
|
||||
readonly -f os::cmd::expect_failure_and_text
|
||||
|
||||
# expect_success_and_not_text runs the cmd and expects an exit code of 0
|
||||
# as well as running a grep test to ensure the given string is not in the output
|
||||
function os::cmd::expect_success_and_not_text() {
|
||||
if [[ $# -ne 2 ]]; then echo "os::cmd::expect_success_and_not_text expects two arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local expected_text=$2
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}" "os::cmd::internal::success_func" "${expected_text}" "os::cmd::internal::failure_func"
|
||||
}
|
||||
readonly -f os::cmd::expect_success_and_not_text
|
||||
|
||||
# expect_failure_and_not_text runs the cmd and expects a non-zero exit code
|
||||
# as well as running a grep test to ensure the given string is not in the output
|
||||
function os::cmd::expect_failure_and_not_text() {
|
||||
if [[ $# -ne 2 ]]; then echo "os::cmd::expect_failure_and_not_text expects two arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local expected_text=$2
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}" "os::cmd::internal::failure_func" "${expected_text}" "os::cmd::internal::failure_func"
|
||||
}
|
||||
readonly -f os::cmd::expect_failure_and_not_text
|
||||
|
||||
# expect_code runs the cmd and expects a given exit code
|
||||
function os::cmd::expect_code() {
|
||||
if [[ $# -ne 2 ]]; then echo "os::cmd::expect_code expects two arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local expected_cmd_code=$2
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}" "os::cmd::internal::specific_code_func ${expected_cmd_code}"
|
||||
}
|
||||
readonly -f os::cmd::expect_code
|
||||
|
||||
# expect_code_and_text runs the cmd and expects the given exit code
|
||||
# as well as running a grep test to find the given string in the output
|
||||
function os::cmd::expect_code_and_text() {
|
||||
if [[ $# -ne 3 ]]; then echo "os::cmd::expect_code_and_text expects three arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local expected_cmd_code=$2
|
||||
local expected_text=$3
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}" "os::cmd::internal::specific_code_func ${expected_cmd_code}" "${expected_text}"
|
||||
}
|
||||
readonly -f os::cmd::expect_code_and_text
|
||||
|
||||
# expect_code_and_not_text runs the cmd and expects the given exit code
|
||||
# as well as running a grep test to ensure the given string is not in the output
|
||||
function os::cmd::expect_code_and_not_text() {
|
||||
if [[ $# -ne 3 ]]; then echo "os::cmd::expect_code_and_not_text expects three arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local expected_cmd_code=$2
|
||||
local expected_text=$3
|
||||
|
||||
os::cmd::internal::expect_exit_code_run_grep "${cmd}" "os::cmd::internal::specific_code_func ${expected_cmd_code}" "${expected_text}" "os::cmd::internal::failure_func"
|
||||
}
|
||||
readonly -f os::cmd::expect_code_and_not_text
|
||||
|
||||
millisecond=1
|
||||
second=$(( 1000 * millisecond ))
|
||||
minute=$(( 60 * second ))
|
||||
|
||||
# os::cmd::try_until_success runs the cmd in a small interval until either the command succeeds or times out
|
||||
# the default time-out for os::cmd::try_until_success is 60 seconds.
|
||||
# the default interval for os::cmd::try_until_success is 200ms
|
||||
function os::cmd::try_until_success() {
|
||||
if [[ $# -lt 1 ]]; then echo "os::cmd::try_until_success expects at least one arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local duration=${2:-$minute}
|
||||
local interval=${3:-0.2}
|
||||
|
||||
os::cmd::internal::run_until_exit_code "${cmd}" "os::cmd::internal::success_func" "${duration}" "${interval}"
|
||||
}
|
||||
readonly -f os::cmd::try_until_success
|
||||
|
||||
# os::cmd::try_until_failure runs the cmd until either the command fails or times out
|
||||
# the default time-out for os::cmd::try_until_failure is 60 seconds.
|
||||
function os::cmd::try_until_failure() {
|
||||
if [[ $# -lt 1 ]]; then echo "os::cmd::try_until_failure expects at least one argument, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local duration=${2:-$minute}
|
||||
local interval=${3:-0.2}
|
||||
|
||||
os::cmd::internal::run_until_exit_code "${cmd}" "os::cmd::internal::failure_func" "${duration}" "${interval}"
|
||||
}
|
||||
readonly -f os::cmd::try_until_failure
|
||||
|
||||
# os::cmd::try_until_text runs the cmd until either the command outputs the desired text or times out
|
||||
# the default time-out for os::cmd::try_until_text is 60 seconds.
|
||||
function os::cmd::try_until_text() {
|
||||
if [[ $# -lt 2 ]]; then echo "os::cmd::try_until_text expects at least two arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local text=$2
|
||||
local duration=${3:-$minute}
|
||||
local interval=${4:-0.2}
|
||||
|
||||
os::cmd::internal::run_until_text "${cmd}" "${text}" "os::cmd::internal::success_func" "${duration}" "${interval}"
|
||||
}
|
||||
readonly -f os::cmd::try_until_text
|
||||
|
||||
# os::cmd::try_until_not_text runs the cmd until either the command doesnot output the text or times out
|
||||
# the default time-out for os::cmd::try_until_not_text is 60 seconds.
|
||||
function os::cmd::try_until_not_text() {
|
||||
if [[ $# -lt 2 ]]; then echo "os::cmd::try_until_not_text expects at least two arguments, got $#"; return 1; fi
|
||||
local cmd=$1
|
||||
local text=$2
|
||||
local duration=${3:-$minute}
|
||||
local interval=${4:-0.2}
|
||||
|
||||
os::cmd::internal::run_until_text "${cmd}" "${text}" "os::cmd::internal::failure_func" "${duration}" "${interval}"
|
||||
}
|
||||
readonly -f os::cmd::try_until_text
|
||||
|
||||
# Functions in the os::cmd::internal namespace are discouraged from being used outside of os::cmd
|
||||
|
||||
# In order to harvest stderr and stdout at the same time into different buckets, we need to stick them into files
|
||||
# in an intermediate step
|
||||
os_cmd_internal_tmpdir="${TMPDIR:-"/tmp"}/cmd"
|
||||
os_cmd_internal_tmpout="${os_cmd_internal_tmpdir}/tmp_stdout.log"
|
||||
os_cmd_internal_tmperr="${os_cmd_internal_tmpdir}/tmp_stderr.log"
|
||||
|
||||
# os::cmd::internal::expect_exit_code_run_grep runs the provided test command and expects a specific
|
||||
# exit code from that command as well as the success of a specified `grep` invocation. Output from the
|
||||
# command to be tested is suppressed unless either `VERBOSE=1` or the test fails. This function bypasses
|
||||
# any error exiting settings or traps set by upstream callers by masking the return code of the command
|
||||
# with the return code of setting the result variable on failure.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - VERBOSE
|
||||
# Arguments:
|
||||
# - 1: the command to run
|
||||
# - 2: command evaluation assertion to use
|
||||
# - 3: text to test for
|
||||
# - 4: text assertion to use
|
||||
# Returns:
|
||||
# - 0: if all assertions met
|
||||
# - 1: if any assertions fail
|
||||
function os::cmd::internal::expect_exit_code_run_grep() {
|
||||
local cmd=$1
|
||||
# default expected cmd code to 0 for success
|
||||
local cmd_eval_func=${2:-os::cmd::internal::success_func}
|
||||
# default to nothing
|
||||
local grep_args=${3:-}
|
||||
# default expected test code to 0 for success
|
||||
local test_eval_func=${4:-os::cmd::internal::success_func}
|
||||
|
||||
local -a junit_log
|
||||
|
||||
os::cmd::internal::init_tempdir
|
||||
os::test::junit::declare_test_start
|
||||
|
||||
local name=$(os::cmd::internal::describe_call "${cmd}" "${cmd_eval_func}" "${grep_args}" "${test_eval_func}")
|
||||
local preamble="Running ${name}..."
|
||||
echo "${preamble}"
|
||||
# for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';'
|
||||
junit_log+=( "${name//$'\n'/;}" )
|
||||
|
||||
local start_time=$(os::cmd::internal::seconds_since_epoch)
|
||||
|
||||
local cmd_result=$( os::cmd::internal::run_collecting_output "${cmd}"; echo $? )
|
||||
local cmd_succeeded=$( ${cmd_eval_func} "${cmd_result}"; echo $? )
|
||||
|
||||
local test_result=0
|
||||
if [[ -n "${grep_args}" ]]; then
|
||||
test_result=$( os::cmd::internal::run_collecting_output 'grep -Eq "${grep_args}" <(os::cmd::internal::get_results)'; echo $? )
|
||||
fi
|
||||
local test_succeeded=$( ${test_eval_func} "${test_result}"; echo $? )
|
||||
|
||||
local end_time=$(os::cmd::internal::seconds_since_epoch)
|
||||
local time_elapsed=$(echo "scale=3; ${end_time} - ${start_time}" | bc | xargs printf '%5.3f') # in decimal seconds, we need leading zeroes for parsing later
|
||||
|
||||
# clear the preamble so we can print out the success or error message
|
||||
os::text::clear_string "${preamble}"
|
||||
|
||||
local return_code
|
||||
if (( cmd_succeeded && test_succeeded )); then
|
||||
os::text::print_green "SUCCESS after ${time_elapsed}s: ${name}"
|
||||
junit_log+=( "SUCCESS after ${time_elapsed}s: ${name//$'\n'/;}" )
|
||||
|
||||
if [[ -n ${VERBOSE-} ]]; then
|
||||
os::cmd::internal::print_results
|
||||
fi
|
||||
return_code=0
|
||||
else
|
||||
local cause=$(os::cmd::internal::assemble_causes "${cmd_succeeded}" "${test_succeeded}")
|
||||
|
||||
os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${name}: ${cause}"
|
||||
junit_log+=( "FAILURE after ${time_elapsed}s: ${name//$'\n'/;}: ${cause}" )
|
||||
|
||||
os::text::print_red "$(os::cmd::internal::print_results)"
|
||||
return_code=1
|
||||
fi
|
||||
|
||||
junit_log+=( "$(os::cmd::internal::print_results)" )
|
||||
# append inside of a subshell so that IFS doesn't get propagated out
|
||||
( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" )
|
||||
os::test::junit::declare_test_end
|
||||
return "${return_code}"
|
||||
}
|
||||
readonly -f os::cmd::internal::expect_exit_code_run_grep
|
||||
|
||||
# os::cmd::internal::init_tempdir initializes the temporary directory
|
||||
function os::cmd::internal::init_tempdir() {
|
||||
mkdir -p "${os_cmd_internal_tmpdir}"
|
||||
rm -f "${os_cmd_internal_tmpdir}"/tmp_std{out,err}.log
|
||||
}
|
||||
readonly -f os::cmd::internal::init_tempdir
|
||||
|
||||
# os::cmd::internal::describe_call determines the file:line of the latest function call made
|
||||
# from outside of this file in the call stack, and the name of the function being called from
|
||||
# that line, returning a string describing the call
|
||||
function os::cmd::internal::describe_call() {
|
||||
local cmd=$1
|
||||
local cmd_eval_func=$2
|
||||
local grep_args=${3:-}
|
||||
local test_eval_func=${4:-}
|
||||
|
||||
local caller_id=$(os::cmd::internal::determine_caller)
|
||||
local full_name="${caller_id}: executing '${cmd}'"
|
||||
|
||||
local cmd_expectation=$(os::cmd::internal::describe_expectation "${cmd_eval_func}")
|
||||
local full_name="${full_name} expecting ${cmd_expectation}"
|
||||
|
||||
if [[ -n "${grep_args}" ]]; then
|
||||
local text_expecting=
|
||||
case "${test_eval_func}" in
|
||||
"os::cmd::internal::success_func")
|
||||
text_expecting="text" ;;
|
||||
"os::cmd::internal::failure_func")
|
||||
text_expecting="not text" ;;
|
||||
esac
|
||||
full_name="${full_name} and ${text_expecting} '${grep_args}'"
|
||||
fi
|
||||
|
||||
echo "${full_name}"
|
||||
}
|
||||
readonly -f os::cmd::internal::describe_call
|
||||
|
||||
# os::cmd::internal::determine_caller determines the file relative to the OpenShift Origin root directory
|
||||
# and line number of the function call to the outer os::cmd wrapper function
|
||||
function os::cmd::internal::determine_caller() {
|
||||
local call_depth=
|
||||
local len_sources="${#BASH_SOURCE[@]}"
|
||||
for (( i=0; i<${len_sources}; i++ )); do
|
||||
if [ ! $(echo "${BASH_SOURCE[i]}" | grep "hack/lib/cmd\.sh$") ]; then
|
||||
call_depth=i
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
local caller_file="${BASH_SOURCE[${call_depth}]}"
|
||||
caller_file="$( os::util::repository_relative_path "${caller_file}" )"
|
||||
local caller_line="${BASH_LINENO[${call_depth}-1]}"
|
||||
echo "${caller_file}:${caller_line}"
|
||||
}
|
||||
readonly -f os::cmd::internal::determine_caller
|
||||
|
||||
# os::cmd::internal::describe_expectation describes a command return code evaluation function
|
||||
function os::cmd::internal::describe_expectation() {
|
||||
local func=$1
|
||||
case "${func}" in
|
||||
"os::cmd::internal::success_func")
|
||||
echo "success" ;;
|
||||
"os::cmd::internal::failure_func")
|
||||
echo "failure" ;;
|
||||
"os::cmd::internal::specific_code_func"*[0-9])
|
||||
local code=$(echo "${func}" | grep -Eo "[0-9]+$")
|
||||
echo "exit code ${code}" ;;
|
||||
"")
|
||||
echo "any result"
|
||||
esac
|
||||
}
|
||||
readonly -f os::cmd::internal::describe_expectation
|
||||
|
||||
# os::cmd::internal::seconds_since_epoch returns the number of seconds elapsed since the epoch
|
||||
# with milli-second precision
|
||||
function os::cmd::internal::seconds_since_epoch() {
|
||||
local ns=$(date +%s%N)
|
||||
# if `date` doesn't support nanoseconds, return second precision
|
||||
if [[ "$ns" == *N ]]; then
|
||||
date "+%s.000"
|
||||
return
|
||||
fi
|
||||
echo $(bc <<< "scale=3; ${ns}/1000000000")
|
||||
}
|
||||
readonly -f os::cmd::internal::seconds_since_epoch
|
||||
|
||||
# os::cmd::internal::run_collecting_output runs the command given, piping stdout and stderr into
|
||||
# the given files, and returning the exit code of the command
|
||||
function os::cmd::internal::run_collecting_output() {
|
||||
local cmd=$1
|
||||
|
||||
local result=
|
||||
$( eval "${cmd}" 1>>"${os_cmd_internal_tmpout}" 2>>"${os_cmd_internal_tmperr}" ) || result=$?
|
||||
local result=${result:-0} # if we haven't set result yet, the command succeeded
|
||||
|
||||
return "${result}"
|
||||
}
|
||||
readonly -f os::cmd::internal::run_collecting_output
|
||||
|
||||
# os::cmd::internal::success_func determines if the input exit code denotes success
|
||||
# this function returns 0 for false and 1 for true to be compatible with arithmetic tests
|
||||
function os::cmd::internal::success_func() {
|
||||
local exit_code=$1
|
||||
|
||||
# use a negated test to get output correct for (( ))
|
||||
[[ "${exit_code}" -ne "0" ]]
|
||||
return $?
|
||||
}
|
||||
readonly -f os::cmd::internal::success_func
|
||||
|
||||
# os::cmd::internal::failure_func determines if the input exit code denotes failure
|
||||
# this function returns 0 for false and 1 for true to be compatible with arithmetic tests
|
||||
function os::cmd::internal::failure_func() {
|
||||
local exit_code=$1
|
||||
|
||||
# use a negated test to get output correct for (( ))
|
||||
[[ "${exit_code}" -eq "0" ]]
|
||||
return $?
|
||||
}
|
||||
readonly -f os::cmd::internal::failure_func
|
||||
|
||||
# os::cmd::internal::specific_code_func determines if the input exit code matches the given code
|
||||
# this function returns 0 for false and 1 for true to be compatible with arithmetic tests
|
||||
function os::cmd::internal::specific_code_func() {
|
||||
local expected_code=$1
|
||||
local exit_code=$2
|
||||
|
||||
# use a negated test to get output correct for (( ))
|
||||
[[ "${exit_code}" -ne "${expected_code}" ]]
|
||||
return $?
|
||||
}
|
||||
readonly -f os::cmd::internal::specific_code_func
|
||||
|
||||
# os::cmd::internal::get_results prints the stderr and stdout files
|
||||
function os::cmd::internal::get_results() {
|
||||
cat "${os_cmd_internal_tmpout}" "${os_cmd_internal_tmperr}"
|
||||
}
|
||||
readonly -f os::cmd::internal::get_results
|
||||
|
||||
# os::cmd::internal::get_last_results prints the stderr and stdout from the last attempt
|
||||
function os::cmd::internal::get_last_results() {
|
||||
cat "${os_cmd_internal_tmpout}" | awk 'BEGIN { RS = "\x1e" } END { print $0 }'
|
||||
cat "${os_cmd_internal_tmperr}" | awk 'BEGIN { RS = "\x1e" } END { print $0 }'
|
||||
}
|
||||
readonly -f os::cmd::internal::get_last_results
|
||||
|
||||
# os::cmd::internal::mark_attempt marks the end of an attempt in the stdout and stderr log files
|
||||
# this is used to make the try_until_* output more concise
|
||||
function os::cmd::internal::mark_attempt() {
|
||||
echo -e '\x1e' >> "${os_cmd_internal_tmpout}"
|
||||
echo -e '\x1e' >> "${os_cmd_internal_tmperr}"
|
||||
}
|
||||
readonly -f os::cmd::internal::mark_attempt
|
||||
|
||||
# os::cmd::internal::compress_output compresses an output file into timeline representation
|
||||
function os::cmd::internal::compress_output() {
|
||||
local logfile=$1
|
||||
|
||||
awk -f ${OS_ROOT}/hack/lib/compress.awk $logfile
|
||||
}
|
||||
readonly -f os::cmd::internal::compress_output
|
||||
|
||||
# os::cmd::internal::print_results pretty-prints the stderr and stdout files. If attempt separators
|
||||
# are present, this function returns a concise view of the stdout and stderr output files using a
|
||||
# timeline format, where consecutive output lines that are the same are condensed into one line
|
||||
# with a counter
|
||||
function os::cmd::internal::print_results() {
|
||||
if [[ -s "${os_cmd_internal_tmpout}" ]]; then
|
||||
echo "Standard output from the command:"
|
||||
if grep -q $'\x1e' "${os_cmd_internal_tmpout}"; then
|
||||
os::cmd::internal::compress_output "${os_cmd_internal_tmpout}"
|
||||
else
|
||||
cat "${os_cmd_internal_tmpout}"; echo
|
||||
fi
|
||||
else
|
||||
echo "There was no output from the command."
|
||||
fi
|
||||
|
||||
if [[ -s "${os_cmd_internal_tmperr}" ]]; then
|
||||
echo "Standard error from the command:"
|
||||
if grep -q $'\x1e' "${os_cmd_internal_tmperr}"; then
|
||||
os::cmd::internal::compress_output "${os_cmd_internal_tmperr}"
|
||||
else
|
||||
cat "${os_cmd_internal_tmperr}"; echo
|
||||
fi
|
||||
else
|
||||
echo "There was no error output from the command."
|
||||
fi
|
||||
}
|
||||
readonly -f os::cmd::internal::print_results
|
||||
|
||||
# os::cmd::internal::assemble_causes determines from the two input booleans which part of the test
|
||||
# failed and generates a nice delimited list of failure causes
|
||||
function os::cmd::internal::assemble_causes() {
|
||||
local cmd_succeeded=$1
|
||||
local test_succeeded=$2
|
||||
|
||||
local causes=()
|
||||
if (( ! cmd_succeeded )); then
|
||||
causes+=("the command returned the wrong error code")
|
||||
fi
|
||||
if (( ! test_succeeded )); then
|
||||
causes+=("the output content test failed")
|
||||
fi
|
||||
|
||||
local list=$(printf '; %s' "${causes[@]}")
|
||||
echo "${list:2}"
|
||||
}
|
||||
readonly -f os::cmd::internal::assemble_causes
|
||||
|
||||
# os::cmd::internal::run_until_exit_code runs the provided command until the exit code test given
|
||||
# succeeds or the timeout given runs out. Output from the command to be tested is suppressed unless
|
||||
# either `VERBOSE=1` or the test fails. This function bypasses any error exiting settings or traps
|
||||
# set by upstream callers by masking the return code of the command with the return code of setting
|
||||
# the result variable on failure.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - VERBOSE
|
||||
# Arguments:
|
||||
# - 1: the command to run
|
||||
# - 2: command evaluation assertion to use
|
||||
# - 3: timeout duration
|
||||
# - 4: interval duration
|
||||
# Returns:
|
||||
# - 0: if all assertions met before timeout
|
||||
# - 1: if timeout occurs
|
||||
function os::cmd::internal::run_until_exit_code() {
|
||||
local cmd=$1
|
||||
local cmd_eval_func=$2
|
||||
local duration=$3
|
||||
local interval=$4
|
||||
|
||||
local -a junit_log
|
||||
|
||||
os::cmd::internal::init_tempdir
|
||||
os::test::junit::declare_test_start
|
||||
|
||||
local description=$(os::cmd::internal::describe_call "${cmd}" "${cmd_eval_func}")
|
||||
local duration_seconds=$(echo "scale=3; $(( duration )) / 1000" | bc | xargs printf '%5.3f')
|
||||
local description="${description}; re-trying every ${interval}s until completion or ${duration_seconds}s"
|
||||
local preamble="Running ${description}..."
|
||||
echo "${preamble}"
|
||||
# for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';'
|
||||
junit_log+=( "${description//$'\n'/;}" )
|
||||
|
||||
local start_time=$(os::cmd::internal::seconds_since_epoch)
|
||||
|
||||
local deadline=$(( $(date +%s000) + $duration ))
|
||||
local cmd_succeeded=0
|
||||
while [ $(date +%s000) -lt $deadline ]; do
|
||||
local cmd_result=$( os::cmd::internal::run_collecting_output "${cmd}"; echo $? )
|
||||
cmd_succeeded=$( ${cmd_eval_func} "${cmd_result}"; echo $? )
|
||||
if (( cmd_succeeded )); then
|
||||
break
|
||||
fi
|
||||
sleep "${interval}"
|
||||
os::cmd::internal::mark_attempt
|
||||
done
|
||||
|
||||
local end_time=$(os::cmd::internal::seconds_since_epoch)
|
||||
local time_elapsed=$(echo "scale=9; ${end_time} - ${start_time}" | bc | xargs printf '%5.3f') # in decimal seconds, we need leading zeroes for parsing later
|
||||
|
||||
# clear the preamble so we can print out the success or error message
|
||||
os::text::clear_string "${preamble}"
|
||||
|
||||
local return_code
|
||||
if (( cmd_succeeded )); then
|
||||
os::text::print_green "SUCCESS after ${time_elapsed}s: ${description}"
|
||||
junit_log+=( "SUCCESS after ${time_elapsed}s: ${description//$'\n'/;}" )
|
||||
|
||||
if [[ -n ${VERBOSE-} ]]; then
|
||||
os::cmd::internal::print_results
|
||||
fi
|
||||
return_code=0
|
||||
else
|
||||
os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${description}: the command timed out"
|
||||
junit_log+=( "FAILURE after ${time_elapsed}s: ${description//$'\n'/;}: the command timed out" )
|
||||
|
||||
os::text::print_red "$(os::cmd::internal::print_results)"
|
||||
return_code=1
|
||||
fi
|
||||
|
||||
junit_log+=( "$(os::cmd::internal::print_results)" )
|
||||
( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" )
|
||||
os::test::junit::declare_test_end
|
||||
return "${return_code}"
|
||||
}
|
||||
readonly -f os::cmd::internal::run_until_exit_code
|
||||
|
||||
# os::cmd::internal::run_until_text runs the provided command until the assertion function succeeds with
|
||||
# the given text on the command output or the timeout given runs out. This can be used to run until the
|
||||
# output does or does not contain some text. Output from the command to be tested is suppressed unless
|
||||
# either `VERBOSE=1` or the test fails. This function bypasses any error exiting settings or traps
|
||||
# set by upstream callers by masking the return code of the command with the return code of setting
|
||||
# the result variable on failure.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - VERBOSE
|
||||
# Arguments:
|
||||
# - 1: the command to run
|
||||
# - 2: text to test for
|
||||
# - 3: text assertion to use
|
||||
# - 4: timeout duration
|
||||
# - 5: interval duration
|
||||
# Returns:
|
||||
# - 0: if all assertions met before timeout
|
||||
# - 1: if timeout occurs
|
||||
function os::cmd::internal::run_until_text() {
|
||||
local cmd=$1
|
||||
local text=$2
|
||||
local test_eval_func=${3:-os::cmd::internal::success_func}
|
||||
local duration=$4
|
||||
local interval=$5
|
||||
|
||||
local -a junit_log
|
||||
|
||||
os::cmd::internal::init_tempdir
|
||||
os::test::junit::declare_test_start
|
||||
|
||||
local description=$(os::cmd::internal::describe_call "${cmd}" "" "${text}" "${test_eval_func}")
|
||||
local duration_seconds=$(echo "scale=3; $(( duration )) / 1000" | bc | xargs printf '%5.3f')
|
||||
local description="${description}; re-trying every ${interval}s until completion or ${duration_seconds}s"
|
||||
local preamble="Running ${description}..."
|
||||
echo "${preamble}"
|
||||
# for ease of parsing, we want the entire declaration on one line, so we replace '\n' with ';'
|
||||
junit_log+=( "${description//$'\n'/;}" )
|
||||
|
||||
local start_time=$(os::cmd::internal::seconds_since_epoch)
|
||||
|
||||
local deadline=$(( $(date +%s000) + $duration ))
|
||||
local test_succeeded=0
|
||||
while [ $(date +%s000) -lt $deadline ]; do
|
||||
local cmd_result=$( os::cmd::internal::run_collecting_output "${cmd}"; echo $? )
|
||||
local test_result
|
||||
test_result=$( os::cmd::internal::run_collecting_output 'grep -Eq "${text}" <(os::cmd::internal::get_last_results)'; echo $? )
|
||||
test_succeeded=$( ${test_eval_func} "${test_result}"; echo $? )
|
||||
|
||||
if (( test_succeeded )); then
|
||||
break
|
||||
fi
|
||||
sleep "${interval}"
|
||||
os::cmd::internal::mark_attempt
|
||||
done
|
||||
|
||||
local end_time=$(os::cmd::internal::seconds_since_epoch)
|
||||
local time_elapsed=$(echo "scale=9; ${end_time} - ${start_time}" | bc | xargs printf '%5.3f') # in decimal seconds, we need leading zeroes for parsing later
|
||||
|
||||
# clear the preamble so we can print out the success or error message
|
||||
os::text::clear_string "${preamble}"
|
||||
|
||||
local return_code
|
||||
if (( test_succeeded )); then
|
||||
os::text::print_green "SUCCESS after ${time_elapsed}s: ${description}"
|
||||
junit_log+=( "SUCCESS after ${time_elapsed}s: ${description//$'\n'/;}" )
|
||||
|
||||
if [[ -n ${VERBOSE-} ]]; then
|
||||
os::cmd::internal::print_results
|
||||
fi
|
||||
return_code=0
|
||||
else
|
||||
os::text::print_red_bold "FAILURE after ${time_elapsed}s: ${description}: the command timed out"
|
||||
junit_log+=( "FAILURE after ${time_elapsed}s: ${description//$'\n'/;}: the command timed out" )
|
||||
|
||||
os::text::print_red "$(os::cmd::internal::print_results)"
|
||||
return_code=1
|
||||
fi
|
||||
|
||||
junit_log+=( "$(os::cmd::internal::print_results)" )
|
||||
( IFS=$'\n'; echo "${junit_log[*]}" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}" )
|
||||
os::test::junit::declare_test_end
|
||||
return "${return_code}"
|
||||
}
|
||||
readonly -f os::cmd::internal::run_until_text
|
||||
41
hack/lib/compress.awk
Normal file
41
hack/lib/compress.awk
Normal file
@@ -0,0 +1,41 @@
|
||||
# Helper functions
|
||||
function trim(s) {
|
||||
gsub(/^[ \t\r\n]+|[ \t\r\n]+$/, "", s);
|
||||
return s;
|
||||
}
|
||||
|
||||
function printRecordAndCount(record, count) {
|
||||
print record;
|
||||
if (count > 1) {
|
||||
printf("... repeated %d times\n", count)
|
||||
}
|
||||
}
|
||||
|
||||
BEGIN {
|
||||
# Before processing, set the record separator to the ASCII record separator character \x1e
|
||||
RS = "\x1e";
|
||||
}
|
||||
|
||||
# This action is executed for each record
|
||||
{
|
||||
# Build our current var from the trimmed record
|
||||
current = trim($0);
|
||||
|
||||
# Bump the count of times we have seen it
|
||||
seen[current]++;
|
||||
|
||||
# Print the previous record and its count (if it is not identical to the current record)
|
||||
if (previous && previous != current) {
|
||||
printRecordAndCount(previous, seen[previous]);
|
||||
}
|
||||
|
||||
# Store the current record as the previous record
|
||||
previous = current;
|
||||
}
|
||||
|
||||
END {
|
||||
# After processing, print the last record and count if it is non-empty
|
||||
if (previous) {
|
||||
printRecordAndCount(previous, seen[previous]);
|
||||
}
|
||||
}
|
||||
155
hack/lib/constants.sh
Executable file
155
hack/lib/constants.sh
Executable file
@@ -0,0 +1,155 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script provides constants for the Golang binary build process
|
||||
|
||||
readonly OS_GO_PACKAGE=github.com/openshift/image-registry
|
||||
|
||||
readonly OS_BUILD_ENV_GOLANG="${OS_BUILD_ENV_GOLANG:-1.8}"
|
||||
readonly OS_BUILD_ENV_IMAGE="${OS_BUILD_ENV_IMAGE:-openshift/origin-release:golang-${OS_BUILD_ENV_GOLANG}}"
|
||||
readonly OS_REQUIRED_GO_VERSION="go1.8"
|
||||
readonly OS_BUILD_ENV_WORKINGDIR="/go/${OS_GO_PACKAGE}"
|
||||
|
||||
readonly OS_OUTPUT_BASEPATH="${OS_OUTPUT_BASEPATH:-_output}"
|
||||
readonly OS_BASE_OUTPUT="${OS_ROOT}/${OS_OUTPUT_BASEPATH}"
|
||||
readonly OS_OUTPUT_SCRIPTPATH="${OS_OUTPUT_SCRIPTPATH:-"${OS_BASE_OUTPUT}/scripts"}"
|
||||
|
||||
readonly OS_OUTPUT_SUBPATH="${OS_OUTPUT_SUBPATH:-${OS_OUTPUT_BASEPATH}/local}"
|
||||
readonly OS_OUTPUT="${OS_ROOT}/${OS_OUTPUT_SUBPATH}"
|
||||
readonly OS_OUTPUT_RELEASEPATH="${OS_OUTPUT}/releases"
|
||||
readonly OS_OUTPUT_RPMPATH="${OS_OUTPUT_RELEASEPATH}/rpms"
|
||||
readonly OS_OUTPUT_BINPATH="${OS_OUTPUT}/bin"
|
||||
readonly OS_OUTPUT_PKGDIR="${OS_OUTPUT}/pkgdir"
|
||||
|
||||
readonly OS_GOFLAGS_TAGS="include_gcs include_oss containers_image_openpgp"
|
||||
|
||||
readonly OS_IMAGE_COMPILE_BINARIES=( )
|
||||
|
||||
readonly OS_CROSS_COMPILE_TARGETS=(
|
||||
cmd/dockerregistry
|
||||
)
|
||||
readonly OS_CROSS_COMPILE_BINARIES=("${OS_CROSS_COMPILE_TARGETS[@]##*/}")
|
||||
|
||||
readonly OS_TEST_TARGETS=( )
|
||||
|
||||
# os::build::get_product_vars exports variables that we expect to change
|
||||
# depending on the distribution of Origin
|
||||
function os::build::get_product_vars() {
|
||||
export OS_BUILD_LDFLAGS_IMAGE_PREFIX="${OS_IMAGE_PREFIX:-"openshift/origin"}"
|
||||
export OS_BUILD_LDFLAGS_DEFAULT_IMAGE_STREAMS="${OS_BUILD_LDFLAGS_DEFAULT_IMAGE_STREAMS:-"centos7"}"
|
||||
}
|
||||
|
||||
# os::build::ldflags calculates the -ldflags argument for building OpenShift
|
||||
function os::build::ldflags() {
|
||||
# Run this in a subshell to prevent settings/variables from leaking.
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
cd "${OS_ROOT}"
|
||||
|
||||
os::build::version::get_vars
|
||||
os::build::get_product_vars
|
||||
|
||||
local buildDate="$(date -u +'%Y-%m-%dT%H:%M:%SZ')"
|
||||
|
||||
declare -a ldflags=()
|
||||
|
||||
ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/oc/bootstrap/docker.defaultImageStreams" "${OS_BUILD_LDFLAGS_DEFAULT_IMAGE_STREAMS}"))
|
||||
ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/cmd/util/variable.DefaultImagePrefix" "${OS_BUILD_LDFLAGS_IMAGE_PREFIX}"))
|
||||
ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.majorFromGit" "${OS_GIT_MAJOR}"))
|
||||
ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.minorFromGit" "${OS_GIT_MINOR}"))
|
||||
ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.versionFromGit" "${OS_GIT_VERSION}"))
|
||||
ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.commitFromGit" "${OS_GIT_COMMIT}"))
|
||||
ldflags+=($(os::build::ldflag "${OS_GO_PACKAGE}/pkg/version.buildDate" "${buildDate}"))
|
||||
|
||||
# The -ldflags parameter takes a single string, so join the output.
|
||||
echo "${ldflags[*]-}"
|
||||
}
|
||||
readonly -f os::build::ldflags
|
||||
|
||||
# No-op
|
||||
function os::build::generate_windows_versioninfo() {
|
||||
:
|
||||
}
|
||||
readonly -f os::build::generate_windows_versioninfo
|
||||
|
||||
function os::build::clean_windows_versioninfo() {
|
||||
:
|
||||
}
|
||||
readonly -f os::build::clean_windows_versioninfo
|
||||
|
||||
# os::util::list_go_src_files lists files we consider part of our project
|
||||
# source code, useful for tools that iterate over source to provide vet-
|
||||
# ting or linting, etc.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::list_go_src_files() {
|
||||
find . -not \( \
|
||||
\( \
|
||||
-wholename './_output' \
|
||||
-o -wholename './.*' \
|
||||
-o -wholename '*/vendor/*' \
|
||||
\) -prune \
|
||||
\) -name '*.go' | sort -u
|
||||
}
|
||||
readonly -f os::util::list_go_src_files
|
||||
|
||||
# os::util::list_go_src_dirs lists dirs in origin/ and cmd/ dirs excluding
|
||||
# doc.go useful for tools that iterate over source to provide vetting or
|
||||
# linting, or for godep-save etc.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::list_go_src_dirs() {
|
||||
os::util::list_go_src_files | cut -d '/' -f 1-2 | grep -v ".go$" | grep -v "^./cmd" | LC_ALL=C sort -u
|
||||
os::util::list_go_src_files | grep "^./cmd/"| cut -d '/' -f 1-3 | grep -v ".go$" | LC_ALL=C sort -u
|
||||
}
|
||||
readonly -f os::util::list_go_src_dirs
|
||||
|
||||
# os::util::list_go_deps outputs the list of dependencies for the project.
|
||||
function os::util::list_go_deps() {
|
||||
go list -f '{{.ImportPath}}{{.Imports}}' ./pkg/... ./cmd/... | tr '[]' ' ' |
|
||||
sed -e 's|${OS_GO_PACKAGE}/vendor/||g'
|
||||
}
|
||||
|
||||
# os::util::list_test_packages_under lists all packages containing Golang test files that we
|
||||
# want to run as unit tests under the given base dir in the source tree
|
||||
function os::util::list_test_packages_under() {
|
||||
local basedir=$*
|
||||
|
||||
# we do not quote ${basedir} to allow for multiple arguments to be passed in as well as to allow for
|
||||
# arguments that use expansion, e.g. paths containing brace expansion or wildcards
|
||||
find ${basedir} -not \( \
|
||||
\( \
|
||||
-path 'vendor' \
|
||||
-o -path '*_output' \
|
||||
-o -path '*.git' \
|
||||
-o -path '*vendor/*' \
|
||||
-o -path '*test/*' \
|
||||
\) -prune \
|
||||
\) -name '*_test.go' | xargs -n1 dirname | sort -u | xargs -n1 printf "${OS_GO_PACKAGE}/%s\n"
|
||||
}
|
||||
readonly -f os::util::list_test_packages_under
|
||||
|
||||
|
||||
# OS_ALL_IMAGES is the list of images built by os::build::images.
|
||||
readonly OS_ALL_IMAGES=(
|
||||
openshift/origin-docker-registry
|
||||
)
|
||||
|
||||
# os::build::images builds all images in this repo.
|
||||
function os::build::images() {
|
||||
# determine the correct tag prefix
|
||||
tag_prefix="${OS_IMAGE_PREFIX:-"openshift/origin"}"
|
||||
# images that depend on "${tag_prefix}-source"
|
||||
os::build::image "${tag_prefix}-docker-registry" images/dockerregistry
|
||||
}
|
||||
60
hack/lib/init.sh
Normal file
60
hack/lib/init.sh
Normal file
@@ -0,0 +1,60 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script is meant to be the entrypoint for OpenShift Bash scripts to import all of the support
|
||||
# libraries at once in order to make Bash script preambles as minimal as possible. This script recur-
|
||||
# sively `source`s *.sh files in this directory tree. As such, no files should be `source`ed outside
|
||||
# of this script to ensure that we do not attempt to overwrite read-only variables.
|
||||
|
||||
set -o errexit
|
||||
set -o nounset
|
||||
set -o pipefail
|
||||
|
||||
OS_SCRIPT_START_TIME="$( date +%s )"; export OS_SCRIPT_START_TIME
|
||||
|
||||
# os::util::absolute_path returns the absolute path to the directory provided
|
||||
function os::util::absolute_path() {
|
||||
local relative_path="$1"
|
||||
local absolute_path
|
||||
|
||||
pushd "${relative_path}" >/dev/null
|
||||
relative_path="$( pwd )"
|
||||
if [[ -h "${relative_path}" ]]; then
|
||||
absolute_path="$( readlink "${relative_path}" )"
|
||||
else
|
||||
absolute_path="${relative_path}"
|
||||
fi
|
||||
popd >/dev/null
|
||||
|
||||
echo "${absolute_path}"
|
||||
}
|
||||
readonly -f os::util::absolute_path
|
||||
|
||||
# find the absolute path to the root of the Origin source tree
|
||||
init_source="$( dirname "${BASH_SOURCE}" )/../.."
|
||||
OS_ROOT="$( os::util::absolute_path "${init_source}" )"
|
||||
export OS_ROOT
|
||||
cd "${OS_ROOT}"
|
||||
|
||||
for library_file in $( find "${OS_ROOT}/hack/lib" -type f -name '*.sh' -not -path '*/hack/lib/init.sh' ); do
|
||||
source "${library_file}"
|
||||
done
|
||||
|
||||
unset library_files library_file init_source
|
||||
|
||||
# all of our Bash scripts need to have the stacktrace
|
||||
# handler installed to deal with errors
|
||||
os::log::stacktrace::install
|
||||
|
||||
# All of our Bash scripts need to have access to the
|
||||
# binaries that we build so we don't have to find
|
||||
# them before every invocation.
|
||||
os::util::environment::update_path_var
|
||||
|
||||
if [[ -z "${OS_TMP_ENV_SET-}" ]]; then
|
||||
os::util::environment::setup_tmpdir_vars "$( basename "$0" ".sh" )"
|
||||
fi
|
||||
|
||||
# Allow setting $JUNIT_REPORT to toggle output behavior
|
||||
if [[ -n "${JUNIT_REPORT:-}" ]]; then
|
||||
export JUNIT_REPORT_OUTPUT="${LOG_DIR}/raw_test_output.log"
|
||||
fi
|
||||
104
hack/lib/log/output.sh
Normal file
104
hack/lib/log/output.sh
Normal file
@@ -0,0 +1,104 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains functions used for writing log messages
|
||||
# to stdout and stderr from scripts while they run.
|
||||
|
||||
# os::log::info writes the message to stdout.
|
||||
#
|
||||
# Arguments:
|
||||
# - all: message to write
|
||||
function os::log::info() {
|
||||
local message; message="$( os::log::internal::prefix_lines "[INFO]" "$*" )"
|
||||
os::log::internal::to_logfile "${message}"
|
||||
echo "${message}"
|
||||
}
|
||||
readonly -f os::log::info
|
||||
|
||||
# os::log::warning writes the message to stderr.
|
||||
# A warning indicates something went wrong but
|
||||
# not so wrong that we cannot recover.
|
||||
#
|
||||
# Arguments:
|
||||
# - all: message to write
|
||||
function os::log::warning() {
|
||||
local message; message="$( os::log::internal::prefix_lines "[WARNING]" "$*" )"
|
||||
os::log::internal::to_logfile "${message}"
|
||||
os::text::print_yellow "${message}" 1>&2
|
||||
}
|
||||
readonly -f os::log::warning
|
||||
|
||||
# os::log::error writes the message to stderr.
|
||||
# An error indicates that something went wrong
|
||||
# and we will most likely fail after this.
|
||||
#
|
||||
# Arguments:
|
||||
# - all: message to write
|
||||
function os::log::error() {
|
||||
local message; message="$( os::log::internal::prefix_lines "[ERROR]" "$*" )"
|
||||
os::log::internal::to_logfile "${message}"
|
||||
os::text::print_red "${message}" 1>&2
|
||||
}
|
||||
readonly -f os::log::error
|
||||
|
||||
# os::log::fatal writes the message to stderr and
|
||||
# returns a non-zero code to force a process exit.
|
||||
# A fatal error indicates that there is no chance
|
||||
# of recovery.
|
||||
#
|
||||
# Arguments:
|
||||
# - all: message to write
|
||||
function os::log::fatal() {
|
||||
local message; message="$( os::log::internal::prefix_lines "[FATAL]" "$*" )"
|
||||
os::log::internal::to_logfile "${message}"
|
||||
os::text::print_red "${message}" 1>&2
|
||||
exit 1
|
||||
}
|
||||
readonly -f os::log::fatal
|
||||
|
||||
# os::log::debug writes the message to stderr if
|
||||
# the ${OS_DEBUG} variable is set.
|
||||
#
|
||||
# Globals:
|
||||
# - OS_DEBUG
|
||||
# Arguments:
|
||||
# - all: message to write
|
||||
function os::log::debug() {
|
||||
local message; message="$( os::log::internal::prefix_lines "[DEBUG]" "$*" )"
|
||||
os::log::internal::to_logfile "${message}"
|
||||
if [[ -n "${OS_DEBUG:-}" ]]; then
|
||||
os::text::print_blue "${message}" 1>&2
|
||||
fi
|
||||
}
|
||||
readonly -f os::log::debug
|
||||
|
||||
# os::log::internal::to_logfile makes a best-effort
|
||||
# attempt to write the message to the script logfile
|
||||
#
|
||||
# Globals:
|
||||
# - LOG_DIR
|
||||
# Arguments:
|
||||
# - all: message to write
|
||||
function os::log::internal::to_logfile() {
|
||||
if [[ -n "${LOG_DIR:-}" && -d "${LOG_DIR-}" ]]; then
|
||||
echo "$*" >>"${LOG_DIR}/scripts.log"
|
||||
fi
|
||||
}
|
||||
|
||||
# os::log::internal::prefix_lines prints out the
|
||||
# original content with the given prefix at the
|
||||
# start of every line.
|
||||
#
|
||||
# Arguments:
|
||||
# - 1: prefix for lines
|
||||
# - 2: content to prefix
|
||||
function os::log::internal::prefix_lines() {
|
||||
local prefix="$1"
|
||||
local content="$2"
|
||||
|
||||
local old_ifs="${IFS}"
|
||||
IFS=$'\n'
|
||||
for line in ${content}; do
|
||||
echo "${prefix} ${line}"
|
||||
done
|
||||
IFS="${old_ifs}"
|
||||
}
|
||||
94
hack/lib/log/stacktrace.sh
Normal file
94
hack/lib/log/stacktrace.sh
Normal file
@@ -0,0 +1,94 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This library contains an implementation of a stack trace for Bash scripts.
|
||||
|
||||
# os::log::stacktrace::install installs the stacktrace as a handler for the ERR signal if one
|
||||
# has not already been installed and sets `set -o errtrace` in order to propagate the handler
|
||||
# If the ERR trap is not initialized, installing this plugin will initialize it.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export OS_USE_STACKTRACE
|
||||
function os::log::stacktrace::install() {
|
||||
# setting 'errtrace' propagates our ERR handler to functions, expansions and subshells
|
||||
set -o errtrace
|
||||
|
||||
# OS_USE_STACKTRACE is read by os::util::trap at runtime to request a stacktrace
|
||||
export OS_USE_STACKTRACE=true
|
||||
|
||||
os::util::trap::init_err
|
||||
}
|
||||
readonly -f os::log::stacktrace::install
|
||||
|
||||
# os::log::stacktrace::print prints the stacktrace and exits with the return code from the script that
|
||||
# called for a stack trace. This function will always return 0 if it is not handling the signal, and if it
|
||||
# is handling the signal, this function will always `exit`, not return, the return code it receives as
|
||||
# its first argument.
|
||||
#
|
||||
# Globals:
|
||||
# - BASH_SOURCE
|
||||
# - BASH_LINENO
|
||||
# - FUNCNAME
|
||||
# Arguments:
|
||||
# - 1: the return code of the command in the script that generated the ERR signal
|
||||
# - 2: the last command that ran before handlers were invoked
|
||||
# - 3: whether or not `set -o errexit` was set in the script that generated the ERR signal
|
||||
# Returns:
|
||||
# None
|
||||
function os::log::stacktrace::print() {
|
||||
local return_code=$1
|
||||
local last_command=$2
|
||||
local errexit_set=${3:-}
|
||||
|
||||
if [[ "${return_code}" = "0" ]]; then
|
||||
# we're not supposed to respond when no error has occurred
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ -z "${errexit_set}" ]]; then
|
||||
# if errexit wasn't set in the shell when the ERR signal was issued, then we can ignore the signal
|
||||
# as this is not cause for failure
|
||||
return 0
|
||||
fi
|
||||
|
||||
# dump the entire stack for debugging purposes
|
||||
os::log::debug "$( os::util::repository_relative_path "${BASH_SOURCE[0]}:${LINENO}: ${BASH_COMMAND}" )"
|
||||
for (( i = 0; i < ${#BASH_LINENO[@]}; i++ )); do
|
||||
os::log::debug "$( os::util::repository_relative_path "${BASH_SOURCE[$i+1]:-"$( os::util::repository_relative_path "$0" )"}" ):${BASH_LINENO[$i]}: ${FUNCNAME[$i]}"
|
||||
done
|
||||
|
||||
# iterate backwards through the stack until we leave library files, so we can be sure we start logging
|
||||
# actual script code and not this handler's call
|
||||
local stack_begin_index
|
||||
for (( stack_begin_index = 0; stack_begin_index < ${#BASH_SOURCE[@]}; stack_begin_index++ )); do
|
||||
if [[ ! "${BASH_SOURCE[${stack_begin_index}]}" =~ hack/lib/(log/stacktrace|util/trap)\.sh ]]; then
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
local preamble_finished
|
||||
local stack_index=1
|
||||
local i
|
||||
for (( i = stack_begin_index; i < ${#BASH_SOURCE[@]}; i++ )); do
|
||||
local bash_source
|
||||
bash_source="$( os::util::repository_relative_path "${BASH_SOURCE[$i]}" )"
|
||||
if [[ -z "${preamble_finished:-}" ]]; then
|
||||
preamble_finished=true
|
||||
os::log::error "PID ${BASHPID:-$$}: ${bash_source}:${BASH_LINENO[$i-1]}: \`${last_command}\` exited with status ${return_code}." >&2
|
||||
os::log::info $'\t\t'"Stack Trace: " >&2
|
||||
os::log::info $'\t\t'" ${stack_index}: ${bash_source}:${BASH_LINENO[$i-1]}: \`${last_command}\`" >&2
|
||||
else
|
||||
os::log::info $'\t\t'" ${stack_index}: ${bash_source}:${BASH_LINENO[$i-1]}: ${FUNCNAME[$i-1]}" >&2
|
||||
fi
|
||||
stack_index=$(( stack_index + 1 ))
|
||||
done
|
||||
|
||||
# we know we're the privileged handler in this chain, so we can safely exit the shell without
|
||||
# starving another handler of the privilege of reacting to this signal
|
||||
os::log::info " Exiting with code ${return_code}." >&2
|
||||
exit "${return_code}"
|
||||
}
|
||||
readonly -f os::log::stacktrace::print
|
||||
243
hack/lib/log/system.sh
Normal file
243
hack/lib/log/system.sh
Normal file
@@ -0,0 +1,243 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This library holds all of the system logging functions for OpenShift bash scripts.
|
||||
|
||||
# os::log::system::install_cleanup installs os::log::system::clean_up as a trap on exit.
|
||||
# If any traps are currently set for these signals, os::log::system::clean_up is prefixed.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::log::system::install_cleanup() {
|
||||
trap "os::log::system::clean_up; $(trap -p EXIT | awk -F"'" '{print $2}')" EXIT
|
||||
}
|
||||
readonly -f os::log::system::install_cleanup
|
||||
|
||||
# os::log::system::clean_up should be trapped so that it can stop the logging utility once the script that
|
||||
# installed it is finished.
|
||||
# This function stops logging and generates plots of data for easy consumption.
|
||||
#
|
||||
# Globals:
|
||||
# - LOG_DIR
|
||||
# - LOGGER_PID
|
||||
# - SAR_LOGFILE
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::log::system::clean_up() {
|
||||
local return_code=$?
|
||||
|
||||
# we don't want failures in this logger to
|
||||
set +o errexit
|
||||
|
||||
if jobs -pr | grep -q "${LOGGER_PID}"; then
|
||||
kill -SIGTERM "${LOGGER_PID}"
|
||||
# give logger ten seconds to gracefully exit before killing it
|
||||
for (( i = 0; i < 10; i++ )); do
|
||||
if ! jobs -pr | grep -q "${LOGGER_PID}"; then
|
||||
# the logger has shutdown, we don't need to wait on it any longer
|
||||
break
|
||||
fi
|
||||
done
|
||||
|
||||
if jobs -pr | grep -q "${LOGGER_PID}"; then
|
||||
# the logger has not shutdown, so kill it
|
||||
kill -SIGKILL "${LOGGER_PID}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if ! which sadf >/dev/null 2>&1; then
|
||||
os::log::warning "System logger data could not be unpacked and graphed, 'sadf' binary not found in this environment."
|
||||
return 0
|
||||
fi
|
||||
|
||||
if [[ ! -s "${SAR_LOGFILE:-}" ]]; then
|
||||
os::log::warning "No system logger data could be found, log file missing."
|
||||
return 0
|
||||
fi
|
||||
|
||||
local log_subset_flags=( "-b" "-B" "-u ALL" "-q" "-r" )
|
||||
|
||||
local log_subset_names=( "iops" "paging" "cpu" "queue" "memory" )
|
||||
|
||||
local log_subset_file
|
||||
local i
|
||||
for (( i = 0; i < "${#log_subset_flags[@]}"; i++ )); do
|
||||
log_subset_file="${LOG_DIR}/${log_subset_names[$i]}.txt"
|
||||
# use sadf utility to extract data into easily-parseable format
|
||||
sadf -d "${SAR_LOGFILE}" -- ${log_subset_flags[$i]} > "${log_subset_file}"
|
||||
|
||||
local ignored_columns="hostname,interval,"
|
||||
|
||||
# special cases for special output from SAR, because the tool often gives us columns full of baloney
|
||||
if [[ "${log_subset_names[$i]}" == "cpu" ]]; then
|
||||
ignored_columns="${ignored_columns}CPU,"
|
||||
fi
|
||||
|
||||
os::log::system::internal::prune_datafile "${log_subset_file}" "${ignored_columns}"
|
||||
os::log::system::internal::plot "${log_subset_file}"
|
||||
done
|
||||
|
||||
# remove the `sar` log file for space constraints
|
||||
rm -f "${SAR_LOGFILE}"
|
||||
|
||||
return "${return_code}"
|
||||
}
|
||||
readonly -f os::log::system::clean_up
|
||||
|
||||
# os::log::system::internal::prune_datafile removes the given columns from a datafile created by 'sadf -d'
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# - 1: datafile
|
||||
# - 2: comma-delimited columns to remove, with trailing comma
|
||||
# Returns:
|
||||
# None
|
||||
function os::log::system::internal::prune_datafile() {
|
||||
local datafile=$1
|
||||
local column_names=$2
|
||||
|
||||
if [[ "${#column_names}" -eq 0 ]]; then
|
||||
return 0
|
||||
fi
|
||||
|
||||
local columns_in_order
|
||||
columns_in_order=( $( head -n 1 "${datafile}" | sed 's/^# //g' | tr ';' ' ' ) )
|
||||
|
||||
local columns_to_keep
|
||||
local i
|
||||
for (( i = 0; i < "${#columns_in_order[@]}"; i++ )); do
|
||||
if ! echo "${column_names}" | grep -q "${columns_in_order[$i]},"; then
|
||||
# this is a column we need to keep, adding one as 'cut' is 1-indexed
|
||||
columns_to_keep+=( "$(( i + 1 ))" )
|
||||
fi
|
||||
done
|
||||
|
||||
# for the proper flag format for 'cut', we join the list delimiting with commas
|
||||
columns_to_keep="$( IFS=','; echo "${columns_to_keep[*]}" )"
|
||||
|
||||
cut --delimiter=';' -f"${columns_to_keep}" "${datafile}" > "${datafile}.tmp"
|
||||
sed -i '1s/^/# /' "${datafile}.tmp"
|
||||
mv "${datafile}.tmp" "${datafile}"
|
||||
}
|
||||
readonly -f os::log::system::internal::prune_datafile
|
||||
|
||||
# os::log::system::internal::plot uses gnuplot to make a plot of some data across time points. This function is intended to be used
|
||||
# on the output of a 'sar -f' read of a sar binary file. Plots will be made of all columns and stacked on each other with one x axis.
|
||||
# This function needs the non-data columns of the file to be prefixed with comments.
|
||||
#
|
||||
# Globals:
|
||||
# - LOG_DIR
|
||||
# Arguments:
|
||||
# - 1: data file
|
||||
# Returns:
|
||||
# None
|
||||
function os::log::system::internal::plot() {
|
||||
local datafile=$1
|
||||
local plotname
|
||||
plotname="$(basename "${datafile}" .txt)"
|
||||
|
||||
# we are expecting the output of a 'sadf -d' read, so the headers will be on the first line of the file
|
||||
local headers
|
||||
headers=( $( head -n 1 "${datafile}" | sed 's/^# //g' | tr ';' ' ' ) )
|
||||
|
||||
local records
|
||||
local width
|
||||
records="$(( $( wc -l < "${datafile}" ) - 1 ))" # one of these lines will be the header comment
|
||||
if [[ "${records}" -gt 90 ]]; then
|
||||
width="$(echo "8.5 + ${records}*0.025" | bc )"
|
||||
else
|
||||
width="8.5"
|
||||
fi
|
||||
|
||||
local gnuplot_directive=( "set terminal pdf size ${width}in,$(( 2 * (${#headers[@]} - 1) ))in" \
|
||||
"set output \"${LOG_DIR}/${plotname}.pdf\"" \
|
||||
"set datafile separator \";\"" \
|
||||
"set xdata time" \
|
||||
"set timefmt '%Y-%m-%d %H:%M:%S UTC'" \
|
||||
"set tmargin 1" \
|
||||
"set bmargin 1" \
|
||||
"set lmargin 20" \
|
||||
"set rmargin 20" \
|
||||
"set multiplot layout ${#headers[@]},1 title \"\n${plotname}\n\"" \
|
||||
"unset title" )
|
||||
|
||||
local i
|
||||
for (( i = 1; i < "${#headers[@]}"; i++ )); do
|
||||
local header
|
||||
header="${headers[$i]}"
|
||||
|
||||
if (( i == ${#headers[@]} - 1 )); then
|
||||
# we need x-tick labels on the bottom plot
|
||||
gnuplot_directive+=( "set xtics format '%H:%M:%S' rotate by -90" )
|
||||
else
|
||||
gnuplot_directive+=( "set format x ''" )
|
||||
fi
|
||||
|
||||
gnuplot_directive+=( "plot \"${datafile}\" using 1:$(( i + 1 )) title \"${header}\" with lines" )
|
||||
done
|
||||
|
||||
# concatenate the array with newlines to get the final directive to send to gnuplot
|
||||
gnuplot_directive="$( IFS=$'\n'; echo "${gnuplot_directive[*]}" )"
|
||||
|
||||
{
|
||||
printf '$ gnuplot <<< %s\n' "${gnuplot_directive}"
|
||||
gnuplot <<< "${gnuplot_directive}" 2>&1
|
||||
printf '\n\n'
|
||||
} >> "${LOG_DIR}/gnuplot.log"
|
||||
|
||||
os::log::debug "Stacked plot for log subset \"${plotname}\" written to ${LOG_DIR}/${plotname}.pdf"
|
||||
}
|
||||
readonly -f os::log::system::internal::plot
|
||||
|
||||
# os::log::system::start installs the system logger and begins logging
|
||||
#
|
||||
# Globals:
|
||||
# - LOG_DIR
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export LOGGER_PID
|
||||
# - export SAR_LOGFILE
|
||||
function os::log::system::start() {
|
||||
if ! which sar >/dev/null 2>&1; then
|
||||
os::log::debug "System logger could not be started, 'sar' binary not found in this environment."
|
||||
return 0
|
||||
fi
|
||||
|
||||
readonly SAR_LOGFILE="${LOG_DIR}/sar.log"
|
||||
export SAR_LOGFILE
|
||||
|
||||
os::log::system::internal::run "${SAR_LOGFILE}" "${LOG_DIR}/sar_stderr.log"
|
||||
|
||||
os::log::system::install_cleanup
|
||||
}
|
||||
readonly -f os::log::system::start
|
||||
|
||||
# os::log::system::internal::run runs the system logger in the background.
|
||||
# 'sar' is configured to run once a second for 24 hours, so the cleanup trap should be installed to ensure that
|
||||
# the process is killed once the parent script is finished.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# - 1: file to log binary outut to
|
||||
# - 2: file to log stderr of the logger to
|
||||
# Returns:
|
||||
# None
|
||||
function os::log::system::internal::run() {
|
||||
local binary_logfile=$1
|
||||
local stderr_logfile=$2
|
||||
|
||||
sar -A -o "${binary_logfile}" 1 86400 1>/dev/null 2>"${stderr_logfile}" &
|
||||
|
||||
LOGGER_PID=$!
|
||||
readonly LOGGER_PID
|
||||
export LOGGER_PID
|
||||
}
|
||||
readonly -f os::log::system::internal::run
|
||||
204
hack/lib/test/junit.sh
Normal file
204
hack/lib/test/junit.sh
Normal file
@@ -0,0 +1,204 @@
|
||||
#!/bin/bash
|
||||
# This utility file contains functions that format test output to be parsed into jUnit XML
|
||||
|
||||
# os::test::junit::declare_suite_start prints a message declaring the start of a test suite
|
||||
# Any number of suites can be in flight at any time, so there is no failure condition for this
|
||||
# script based on the number of suites in flight.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - NUM_OS_JUNIT_SUITES_IN_FLIGHT
|
||||
# Arguments:
|
||||
# - 1: the suite name that is starting
|
||||
# Returns:
|
||||
# - increment NUM_OS_JUNIT_SUITES_IN_FLIGHT
|
||||
function os::test::junit::declare_suite_start() {
|
||||
local suite_name=$1
|
||||
local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}
|
||||
|
||||
echo "=== BEGIN TEST SUITE github.com/openshift/origin/test/${suite_name} ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}"
|
||||
NUM_OS_JUNIT_SUITES_IN_FLIGHT=$(( ${num_suites} + 1 ))
|
||||
export NUM_OS_JUNIT_SUITES_IN_FLIGHT
|
||||
}
|
||||
readonly -f os::test::junit::declare_suite_start
|
||||
|
||||
# os::test::junit::declare_suite_end prints a message declaring the end of a test suite
|
||||
# If there aren't any suites in flight, this function will fail.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - NUM_OS_JUNIT_SUITES_IN_FLIGHT
|
||||
# Arguments:
|
||||
# - 1: the suite name that is starting
|
||||
# Returns:
|
||||
# - export/decrement NUM_OS_JUNIT_SUITES_IN_FLIGHT
|
||||
function os::test::junit::declare_suite_end() {
|
||||
local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}
|
||||
if [[ "${num_suites}" -lt "1" ]]; then
|
||||
# we can't end a suite if none have been started yet
|
||||
echo "[ERROR] jUnit suite marker could not be placed, expected suites in flight, got ${num_suites}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "=== END TEST SUITE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}"
|
||||
NUM_OS_JUNIT_SUITES_IN_FLIGHT=$(( ${num_suites} - 1 ))
|
||||
export NUM_OS_JUNIT_SUITES_IN_FLIGHT
|
||||
}
|
||||
readonly -f os::test::junit::declare_suite_end
|
||||
|
||||
# os::test::junit::declare_test_start prints a message declaring the start of a test case
|
||||
# If there is already a test marked as being in flight, this function will fail.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - NUM_OS_JUNIT_TESTS_IN_FLIGHT
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - increment NUM_OS_JUNIT_TESTS_IN_FLIGHT
|
||||
function os::test::junit::declare_test_start() {
|
||||
local num_tests=${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0}
|
||||
if [[ "${num_tests}" -ne "0" ]]; then
|
||||
# someone's declaring the starting of a test when a test is already in flight
|
||||
echo "[ERROR] jUnit test marker could not be placed, expected no tests in flight, got ${num_tests}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
local num_suites=${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}
|
||||
if [[ "${num_suites}" -lt "1" ]]; then
|
||||
# we can't end a test if no suites are in flight
|
||||
echo "[ERROR] jUnit test marker could not be placed, expected suites in flight, got ${num_suites}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "=== BEGIN TEST CASE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}"
|
||||
NUM_OS_JUNIT_TESTS_IN_FLIGHT=$(( ${num_tests} + 1 ))
|
||||
export NUM_OS_JUNIT_TESTS_IN_FLIGHT
|
||||
}
|
||||
readonly -f os::test::junit::declare_test_start
|
||||
|
||||
# os::test::junit::declare_test_end prints a message declaring the end of a test case
|
||||
# If there is no test marked as being in flight, this function will fail.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - NUM_OS_JUNIT_TESTS_IN_FLIGHT
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - decrement NUM_OS_JUNIT_TESTS_IN_FLIGHT
|
||||
function os::test::junit::declare_test_end() {
|
||||
local num_tests=${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0}
|
||||
if [[ "${num_tests}" -ne "1" ]]; then
|
||||
# someone's declaring the end of a test when a test is not in flight
|
||||
echo "[ERROR] jUnit test marker could not be placed, expected one test in flight, got ${num_tests}"
|
||||
return 1
|
||||
fi
|
||||
|
||||
echo "=== END TEST CASE ===" >> "${JUNIT_REPORT_OUTPUT:-/dev/null}"
|
||||
NUM_OS_JUNIT_TESTS_IN_FLIGHT=$(( ${num_tests} - 1 ))
|
||||
export NUM_OS_JUNIT_TESTS_IN_FLIGHT
|
||||
}
|
||||
readonly -f os::test::junit::declare_test_end
|
||||
|
||||
# os::test::junit::check_test_counters checks that we do not have any test suites or test cases in flight
|
||||
# This function should be called at the very end of any test script using jUnit markers to make sure no error in
|
||||
# marking has occurred.
|
||||
#
|
||||
# Globals:
|
||||
# - NUM_OS_JUNIT_SUITES_IN_FLIGHT
|
||||
# - NUM_OS_JUNIT_TESTS_IN_FLIGHT
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::test::junit::check_test_counters() {
|
||||
if [[ "${NUM_OS_JUNIT_SUITES_IN_FLIGHT-}" -ne "0" ]]; then
|
||||
echo "[ERROR] Expected no test suites to be marked as in-flight at the end of testing, got ${NUM_OS_JUNIT_SUITES_IN_FLIGHT-}"
|
||||
return 1
|
||||
elif [[ "${NUM_OS_JUNIT_TESTS_IN_FLIGHT-}" -ne "0" ]]; then
|
||||
echo "[ERROR] Expected no test cases to be marked as in-flight at the end of testing, got ${NUM_OS_JUNIT_TESTS_IN_FLIGHT-}"
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
readonly -f os::test::junit::check_test_counters
|
||||
|
||||
# os::test::junit::reconcile_output appends the necessary suite and test end statements to the jUnit output file
|
||||
# in order to ensure that the file is in a consistent state to allow for parsing
|
||||
#
|
||||
# Globals:
|
||||
# - NUM_OS_JUNIT_SUITES_IN_FLIGHT
|
||||
# - NUM_OS_JUNIT_TESTS_IN_FLIGHT
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::test::junit::reconcile_output() {
|
||||
if [[ "${NUM_OS_JUNIT_TESTS_IN_FLIGHT:-0}" = "1" ]]; then
|
||||
os::test::junit::declare_test_end
|
||||
fi
|
||||
|
||||
for (( i = 0; i < ${NUM_OS_JUNIT_SUITES_IN_FLIGHT:-0}; i++ )); do
|
||||
os::test::junit::declare_suite_end
|
||||
done
|
||||
}
|
||||
readonly -f os::test::junit::reconcile_output
|
||||
|
||||
# os::test::junit::generate_report determines which type of report is to
|
||||
# be generated and does so from the raw output of the tests.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - ARTIFACT_DIR
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::test::junit::generate_report() {
|
||||
if [[ -z "${JUNIT_REPORT_OUTPUT:-}" ||
|
||||
-n "${JUNIT_REPORT_OUTPUT:-}" && ! -s "${JUNIT_REPORT_OUTPUT:-}" ]]; then
|
||||
# we can't generate a report
|
||||
return
|
||||
fi
|
||||
|
||||
if grep -q "=== END TEST CASE ===" "${JUNIT_REPORT_OUTPUT}"; then
|
||||
os::test::junit::reconcile_output
|
||||
os::test::junit::check_test_counters
|
||||
os::test::junit::internal::generate_report "oscmd"
|
||||
else
|
||||
os::test::junit::internal::generate_report "gotest"
|
||||
fi
|
||||
}
|
||||
|
||||
# os::test::junit::internal::generate_report generats an XML jUnit
|
||||
# report for either `os::cmd` or `go test`, based on the passed
|
||||
# argument. If the `junitreport` binary is not present, it will be built.
|
||||
#
|
||||
# Globals:
|
||||
# - JUNIT_REPORT_OUTPUT
|
||||
# - ARTIFACT_DIR
|
||||
# Arguments:
|
||||
# - 1: specify which type of tests command output should junitreport read
|
||||
# Returns:
|
||||
# export JUNIT_REPORT_NUM_FAILED
|
||||
function os::test::junit::internal::generate_report() {
|
||||
local report_type="$1"
|
||||
os::util::ensure::built_binary_exists 'junitreport'
|
||||
|
||||
local report_file
|
||||
report_file="$( mktemp "${ARTIFACT_DIR}/${report_type}_report_XXXXX" ).xml"
|
||||
os::log::info "jUnit XML report placed at $( os::util::repository_relative_path ${report_file} )"
|
||||
junitreport --type "${report_type}" \
|
||||
--suites nested \
|
||||
--roots github.com/openshift/origin \
|
||||
--output "${report_file}" \
|
||||
<"${JUNIT_REPORT_OUTPUT}"
|
||||
|
||||
local summary
|
||||
summary=$( junitreport summarize <"${report_file}" )
|
||||
|
||||
JUNIT_REPORT_NUM_FAILED="$( grep -oE "[0-9]+ failed" <<<"${summary}" )"
|
||||
export JUNIT_REPORT_NUM_FAILED
|
||||
|
||||
echo "${summary}"
|
||||
}
|
||||
95
hack/lib/util/ensure.sh
Normal file
95
hack/lib/util/ensure.sh
Normal file
@@ -0,0 +1,95 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script contains helper functions for ensuring that dependencies
|
||||
# exist on a host system that are required to run Origin scripts.
|
||||
|
||||
# os::util::ensure::system_binary_exists ensures that the
|
||||
# given binary exists on the system in the $PATH.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# - 1: binary to search for
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::ensure::system_binary_exists() {
|
||||
local binary="$1"
|
||||
|
||||
if ! os::util::find::system_binary "${binary}" >/dev/null 2>&1; then
|
||||
os::log::fatal "Required \`${binary}\` binary was not found in \$PATH."
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::ensure::system_binary_exists
|
||||
|
||||
# os::util::ensure::built_binary_exists ensures that the
|
||||
# given binary exists on the system in the local output
|
||||
# directory for the current platform. If it doesn't, we
|
||||
# will attempt to build it if we can determine the correct
|
||||
# hack/build-go.sh target for the binary.
|
||||
#
|
||||
# This function will attempt to determine the correct
|
||||
# hack/build-go.sh target for the binary, but may not
|
||||
# be able to do so if the target doesn't live under
|
||||
# cmd/ or tools/. In that case, one should be given.
|
||||
#
|
||||
# Globals:
|
||||
# - OS_ROOT
|
||||
# Arguments:
|
||||
# - 1: binary to search for
|
||||
# - 2: optional build target for this binary
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::ensure::built_binary_exists() {
|
||||
local binary="$1"
|
||||
local target="${2:-}"
|
||||
|
||||
if ! os::util::find::built_binary "${binary}" >/dev/null 2>&1; then
|
||||
if [[ -z "${target}" ]]; then
|
||||
if [[ -d "${OS_ROOT}/cmd/${binary}" ]]; then
|
||||
target="cmd/${binary}"
|
||||
elif [[ -d "${OS_ROOT}/tools/${binary}" ]]; then
|
||||
target="tools/${binary}"
|
||||
elif [[ -d "${OS_ROOT}/tools/rebasehelpers/${binary}" ]]; then
|
||||
target="tools/rebasehelpers/${binary}"
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ -n "${target}" ]]; then
|
||||
os::log::info "No compiled \`${binary}\` binary was found. Attempting to build one using:
|
||||
$ hack/build-go.sh ${target}"
|
||||
"${OS_ROOT}/hack/build-go.sh" "${target}"
|
||||
else
|
||||
os::log::fatal "No compiled \`${binary}\` binary was found and no build target could be determined.
|
||||
Provide the binary and try running $0 again."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::ensure::built_binary_exists
|
||||
|
||||
# os::util::ensure::gopath_binary_exists ensures that the
|
||||
# given binary exists on the system in $GOPATH. If it
|
||||
# doesn't, we will attempt to build it if we can determine
|
||||
# the correct install path for the binary.
|
||||
#
|
||||
# Globals:
|
||||
# - GOPATH
|
||||
# Arguments:
|
||||
# - 1: binary to search for
|
||||
# - 2: [optional] path to install from
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::ensure::gopath_binary_exists() {
|
||||
local binary="$1"
|
||||
local install_path="${2:-}"
|
||||
|
||||
if ! os::util::find::gopath_binary "${binary}" >/dev/null 2>&1; then
|
||||
if [[ -n "${install_path:-}" ]]; then
|
||||
os::log::info "No installed \`${binary}\` was found in \$GOPATH. Attempting to install using:
|
||||
$ go get ${install_path}"
|
||||
go get "${install_path}"
|
||||
else
|
||||
os::log::fatal "Required \`${binary}\` binary was not found in \$GOPATH."
|
||||
fi
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::ensure::gopath_binary_exists
|
||||
289
hack/lib/util/environment.sh
Normal file
289
hack/lib/util/environment.sh
Normal file
@@ -0,0 +1,289 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script holds library functions for setting up the shell environment for OpenShift scripts
|
||||
|
||||
# os::util::environment::use_sudo updates $USE_SUDO to be 'true', so that later scripts choosing between
|
||||
# execution using 'sudo' and execution without it chose to use 'sudo'
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export USE_SUDO
|
||||
function os::util::environment::use_sudo() {
|
||||
USE_SUDO=true
|
||||
export USE_SUDO
|
||||
}
|
||||
readonly -f os::util::environment::use_sudo
|
||||
|
||||
# os::util::environment::setup_time_vars sets up environment variables that describe durations of time
|
||||
# These variables can be used to specify times for other utility functions
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export TIME_MS
|
||||
# - export TIME_SEC
|
||||
# - export TIME_MIN
|
||||
function os::util::environment::setup_time_vars() {
|
||||
TIME_MS=1
|
||||
export TIME_MS
|
||||
TIME_SEC="$(( 1000 * ${TIME_MS} ))"
|
||||
export TIME_SEC
|
||||
TIME_MIN="$(( 60 * ${TIME_SEC} ))"
|
||||
export TIME_MIN
|
||||
}
|
||||
readonly -f os::util::environment::setup_time_vars
|
||||
|
||||
# os::util::environment::setup_all_server_vars sets up all environment variables necessary to configure and start an OpenShift server
|
||||
#
|
||||
# Globals:
|
||||
# - OS_ROOT
|
||||
# - PATH
|
||||
# - TMPDIR
|
||||
# - LOG_DIR
|
||||
# - ARTIFACT_DIR
|
||||
# - KUBELET_SCHEME
|
||||
# - KUBELET_BIND_HOST
|
||||
# - KUBELET_HOST
|
||||
# - KUBELET_PORT
|
||||
# - BASETMPDIR
|
||||
# - ETCD_PORT
|
||||
# - ETCD_PEER_PORT
|
||||
# - API_BIND_HOST
|
||||
# - API_HOST
|
||||
# - API_PORT
|
||||
# - API_SCHEME
|
||||
# - PUBLIC_MASTER_HOST
|
||||
# - USE_IMAGES
|
||||
# Arguments:
|
||||
# - 1: the path under the root temporary directory for OpenShift where these subdirectories should be made
|
||||
# Returns:
|
||||
# - export PATH
|
||||
# - export BASETMPDIR
|
||||
# - export LOG_DIR
|
||||
# - export VOLUME_DIR
|
||||
# - export ARTIFACT_DIR
|
||||
# - export FAKE_HOME_DIR
|
||||
# - export KUBELET_SCHEME
|
||||
# - export KUBELET_BIND_HOST
|
||||
# - export KUBELET_HOST
|
||||
# - export KUBELET_PORT
|
||||
# - export ETCD_PORT
|
||||
# - export ETCD_PEER_PORT
|
||||
# - export ETCD_DATA_DIR
|
||||
# - export API_BIND_HOST
|
||||
# - export API_HOST
|
||||
# - export API_PORT
|
||||
# - export API_SCHEME
|
||||
# - export SERVER_CONFIG_DIR
|
||||
# - export MASTER_CONFIG_DIR
|
||||
# - export NODE_CONFIG_DIR
|
||||
# - export USE_IMAGES
|
||||
# - export TAG
|
||||
function os::util::environment::setup_all_server_vars() {
|
||||
os::util::environment::setup_kubelet_vars
|
||||
os::util::environment::setup_etcd_vars
|
||||
os::util::environment::setup_server_vars
|
||||
os::util::environment::setup_images_vars
|
||||
}
|
||||
readonly -f os::util::environment::setup_all_server_vars
|
||||
|
||||
# os::util::environment::update_path_var updates $PATH so that OpenShift binaries are available
|
||||
#
|
||||
# Globals:
|
||||
# - OS_ROOT
|
||||
# - PATH
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export PATH
|
||||
function os::util::environment::update_path_var() {
|
||||
local prefix
|
||||
if os::util::find::system_binary 'go' >/dev/null 2>&1; then
|
||||
prefix+="${OS_OUTPUT_BINPATH}/$(os::build::host_platform):"
|
||||
fi
|
||||
if [[ -n "${GOPATH:-}" ]]; then
|
||||
prefix+="${GOPATH}/bin:"
|
||||
fi
|
||||
|
||||
PATH="${prefix:-}${PATH}"
|
||||
export PATH
|
||||
}
|
||||
readonly -f os::util::environment::update_path_var
|
||||
|
||||
# os::util::environment::setup_tmpdir_vars sets up temporary directory path variables
|
||||
#
|
||||
# Globals:
|
||||
# - TMPDIR
|
||||
# Arguments:
|
||||
# - 1: the path under the root temporary directory for OpenShift where these subdirectories should be made
|
||||
# Returns:
|
||||
# - export BASETMPDIR
|
||||
# - export BASEOUTDIR
|
||||
# - export LOG_DIR
|
||||
# - export VOLUME_DIR
|
||||
# - export ARTIFACT_DIR
|
||||
# - export FAKE_HOME_DIR
|
||||
# - export OS_TMP_ENV_SET
|
||||
function os::util::environment::setup_tmpdir_vars() {
|
||||
local sub_dir=$1
|
||||
|
||||
BASETMPDIR="${TMPDIR:-/tmp}/openshift/${sub_dir}"
|
||||
export BASETMPDIR
|
||||
VOLUME_DIR="${BASETMPDIR}/volumes"
|
||||
export VOLUME_DIR
|
||||
|
||||
BASEOUTDIR="${OS_OUTPUT_SCRIPTPATH}/${sub_dir}"
|
||||
export BASEOUTDIR
|
||||
LOG_DIR="${LOG_DIR:-${BASEOUTDIR}/logs}"
|
||||
export LOG_DIR
|
||||
ARTIFACT_DIR="${ARTIFACT_DIR:-${BASEOUTDIR}/artifacts}"
|
||||
export ARTIFACT_DIR
|
||||
FAKE_HOME_DIR="${BASEOUTDIR}/openshift.local.home"
|
||||
export FAKE_HOME_DIR
|
||||
|
||||
mkdir -p "${LOG_DIR}" "${VOLUME_DIR}" "${ARTIFACT_DIR}" "${FAKE_HOME_DIR}"
|
||||
|
||||
export OS_TMP_ENV_SET="${sub_dir}"
|
||||
}
|
||||
readonly -f os::util::environment::setup_tmpdir_vars
|
||||
|
||||
# os::util::environment::setup_kubelet_vars sets up environment variables necessary for interacting with the kubelet
|
||||
#
|
||||
# Globals:
|
||||
# - KUBELET_SCHEME
|
||||
# - KUBELET_BIND_HOST
|
||||
# - KUBELET_HOST
|
||||
# - KUBELET_PORT
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export KUBELET_SCHEME
|
||||
# - export KUBELET_BIND_HOST
|
||||
# - export KUBELET_HOST
|
||||
# - export KUBELET_PORT
|
||||
function os::util::environment::setup_kubelet_vars() {
|
||||
KUBELET_SCHEME="${KUBELET_SCHEME:-https}"
|
||||
export KUBELET_SCHEME
|
||||
KUBELET_BIND_HOST="${KUBELET_BIND_HOST:-$(openshift start --print-ip || echo "127.0.0.1")}"
|
||||
export KUBELET_BIND_HOST
|
||||
KUBELET_HOST="${KUBELET_HOST:-${KUBELET_BIND_HOST}}"
|
||||
export KUBELET_HOST
|
||||
KUBELET_PORT="${KUBELET_PORT:-10250}"
|
||||
export KUBELET_PORT
|
||||
}
|
||||
readonly -f os::util::environment::setup_kubelet_vars
|
||||
|
||||
# os::util::environment::setup_etcd_vars sets up environment variables necessary for interacting with etcd
|
||||
#
|
||||
# Globals:
|
||||
# - BASETMPDIR
|
||||
# - ETCD_HOST
|
||||
# - ETCD_PORT
|
||||
# - ETCD_PEER_PORT
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export ETCD_HOST
|
||||
# - export ETCD_PORT
|
||||
# - export ETCD_PEER_PORT
|
||||
# - export ETCD_DATA_DIR
|
||||
function os::util::environment::setup_etcd_vars() {
|
||||
ETCD_HOST="${ETCD_HOST:-127.0.0.1}"
|
||||
export ETCD_HOST
|
||||
ETCD_PORT="${ETCD_PORT:-4001}"
|
||||
export ETCD_PORT
|
||||
ETCD_PEER_PORT="${ETCD_PEER_PORT:-7001}"
|
||||
export ETCD_PEER_PORT
|
||||
|
||||
ETCD_DATA_DIR="${BASETMPDIR}/etcd"
|
||||
export ETCD_DATA_DIR
|
||||
|
||||
mkdir -p "${ETCD_DATA_DIR}"
|
||||
}
|
||||
readonly -f os::util::environment::setup_etcd_vars
|
||||
|
||||
# os::util::environment::setup_server_vars sets up environment variables necessary for interacting with the server
|
||||
#
|
||||
# Globals:
|
||||
# - BASETMPDIR
|
||||
# - KUBELET_HOST
|
||||
# - API_BIND_HOST
|
||||
# - API_HOST
|
||||
# - API_PORT
|
||||
# - API_SCHEME
|
||||
# - PUBLIC_MASTER_HOST
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export API_BIND_HOST
|
||||
# - export API_HOST
|
||||
# - export API_PORT
|
||||
# - export API_SCHEME
|
||||
# - export SERVER_CONFIG_DIR
|
||||
# - export MASTER_CONFIG_DIR
|
||||
# - export NODE_CONFIG_DIR
|
||||
function os::util::environment::setup_server_vars() {
|
||||
# turn on cache mutation detector every time we start a server
|
||||
KUBE_CACHE_MUTATION_DETECTOR="${KUBE_CACHE_MUTATION_DETECTOR:-true}"
|
||||
export KUBE_CACHE_MUTATION_DETECTOR
|
||||
|
||||
API_BIND_HOST="${API_BIND_HOST:-$(openshift start --print-ip || echo "127.0.0.1")}"
|
||||
export API_BIND_HOST
|
||||
API_HOST="${API_HOST:-${API_BIND_HOST}}"
|
||||
export API_HOST
|
||||
API_PORT="${API_PORT:-8443}"
|
||||
export API_PORT
|
||||
API_SCHEME="${API_SCHEME:-https}"
|
||||
export API_SCHEME
|
||||
|
||||
MASTER_ADDR="${API_SCHEME}://${API_HOST}:${API_PORT}"
|
||||
export MASTER_ADDR
|
||||
PUBLIC_MASTER_HOST="${PUBLIC_MASTER_HOST:-${API_HOST}}"
|
||||
export PUBLIC_MASTER_HOST
|
||||
|
||||
SERVER_CONFIG_DIR="${BASETMPDIR}/openshift.local.config"
|
||||
export SERVER_CONFIG_DIR
|
||||
MASTER_CONFIG_DIR="${SERVER_CONFIG_DIR}/master"
|
||||
export MASTER_CONFIG_DIR
|
||||
NODE_CONFIG_DIR="${SERVER_CONFIG_DIR}/node-${KUBELET_HOST}"
|
||||
export NODE_CONFIG_DIR
|
||||
|
||||
mkdir -p "${SERVER_CONFIG_DIR}" "${MASTER_CONFIG_DIR}" "${NODE_CONFIG_DIR}"
|
||||
}
|
||||
readonly -f os::util::environment::setup_server_vars
|
||||
|
||||
# os::util::environment::setup_images_vars sets up environment variables necessary for interacting with release images
|
||||
#
|
||||
# Globals:
|
||||
# - OS_ROOT
|
||||
# - USE_IMAGES
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export USE_IMAGES
|
||||
# - export TAG
|
||||
# - export MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY
|
||||
function os::util::environment::setup_images_vars() {
|
||||
# Use either the latest release built images, or latest.
|
||||
IMAGE_PREFIX="${OS_IMAGE_PREFIX:-"openshift/origin"}"
|
||||
if [[ -z "${USE_IMAGES-}" ]]; then
|
||||
TAG='latest'
|
||||
export TAG
|
||||
USE_IMAGES="${IMAGE_PREFIX}-\${component}:latest"
|
||||
export USE_IMAGES
|
||||
|
||||
if [[ -e "${OS_ROOT}/_output/local/releases/.commit" ]]; then
|
||||
TAG="$(cat "${OS_ROOT}/_output/local/releases/.commit")"
|
||||
export TAG
|
||||
USE_IMAGES="${IMAGE_PREFIX}-\${component}:${TAG}"
|
||||
export USE_IMAGES
|
||||
fi
|
||||
fi
|
||||
export MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY="${MAX_IMAGES_BULK_IMPORTED_PER_REPOSITORY:-3}"
|
||||
}
|
||||
readonly -f os::util::environment::setup_images_vars
|
||||
73
hack/lib/util/find.sh
Normal file
73
hack/lib/util/find.sh
Normal file
@@ -0,0 +1,73 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script contains helper functions for finding components
|
||||
# in the Origin repository or on the host machine running scripts.
|
||||
|
||||
# os::util::find::system_binary determines the absolute path to a
|
||||
# system binary, if it exists.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# - 1: binary name
|
||||
# Returns:
|
||||
# - location of the binary
|
||||
function os::util::find::system_binary() {
|
||||
local binary_name="$1"
|
||||
|
||||
command -v "${binary_name}"
|
||||
}
|
||||
readonly -f os::util::find::system_binary
|
||||
|
||||
# os::util::find::built_binary determines the absolute path to a
|
||||
# built binary for the current platform, if it exists.
|
||||
#
|
||||
# Globals:
|
||||
# - OS_OUTPUT_BINPATH
|
||||
# Arguments:
|
||||
# - 1: binary name
|
||||
# Returns:
|
||||
# - location of the binary
|
||||
function os::util::find::built_binary() {
|
||||
local binary_name="$1"
|
||||
|
||||
local binary_path; binary_path="${OS_OUTPUT_BINPATH}/$( os::build::host_platform )/${binary_name}"
|
||||
# we need to check that the path leads to a file
|
||||
# as directories also have the executable bit set
|
||||
if [[ -f "${binary_path}" && -x "${binary_path}" ]]; then
|
||||
echo "${binary_path}"
|
||||
return 0
|
||||
else
|
||||
return 1
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::find::built_binary
|
||||
|
||||
# os::util::find::gopath_binary determines the absolute path to a
|
||||
# binary installed through the go toolchain, if it exists.
|
||||
#
|
||||
# Globals:
|
||||
# - GOPATH
|
||||
# Arguments:
|
||||
# - 1: binary name
|
||||
# Returns:
|
||||
# - location of the binary
|
||||
function os::util::find::gopath_binary() {
|
||||
local binary_name="$1"
|
||||
|
||||
local old_ifs="${IFS}"
|
||||
IFS=":"
|
||||
for part in ${GOPATH}; do
|
||||
local binary_path="${part}/bin/${binary_name}"
|
||||
# we need to check that the path leads to a file
|
||||
# as directories also have the executable bit set
|
||||
if [[ -f "${binary_path}" && -x "${binary_path}" ]]; then
|
||||
echo "${binary_path}"
|
||||
IFS="${old_ifs}"
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
IFS="${old_ifs}"
|
||||
return 1
|
||||
}
|
||||
readonly -f os::util::find::gopath_binary
|
||||
22
hack/lib/util/golang.sh
Normal file
22
hack/lib/util/golang.sh
Normal file
@@ -0,0 +1,22 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This library holds golang related utility functions.
|
||||
|
||||
# os::golang::verify_go_version ensure the go tool exists and is a viable version.
|
||||
function os::golang::verify_go_version() {
|
||||
os::util::ensure::system_binary_exists 'go'
|
||||
|
||||
local go_version
|
||||
go_version=($(go version))
|
||||
if [[ "${go_version[2]}" != go1.8* ]]; then
|
||||
os::log::info "Detected go version: ${go_version[*]}."
|
||||
if [[ -z "${PERMISSIVE_GO:-}" ]]; then
|
||||
os::log::fatal "Please install Go version ${OS_REQUIRED_GO_VERSION} or use PERMISSIVE_GO=y to bypass this check."
|
||||
else
|
||||
os::log::warning "Detected golang version doesn't match required Go version."
|
||||
os::log::warning "This version mismatch could lead to differences in execution between this run and the CI systems."
|
||||
return 0
|
||||
fi
|
||||
fi
|
||||
}
|
||||
readonly -f os::golang::verify_go_version
|
||||
147
hack/lib/util/misc.sh
Normal file
147
hack/lib/util/misc.sh
Normal file
@@ -0,0 +1,147 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This library holds miscellaneous utility functions. If there begin to be groups of functions in this
|
||||
# file that share intent or are thematically similar, they should be split into their own files.
|
||||
|
||||
# os::util::describe_return_code describes an exit code
|
||||
#
|
||||
# Globals:
|
||||
# - OS_SCRIPT_START_TIME
|
||||
# Arguments:
|
||||
# - 1: exit code to describe
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::describe_return_code() {
|
||||
local return_code=$1
|
||||
local message="$( os::util::repository_relative_path $0 ) exited with code ${return_code} "
|
||||
|
||||
if [[ -n "${OS_SCRIPT_START_TIME:-}" ]]; then
|
||||
local end_time
|
||||
end_time="$(date +%s)"
|
||||
local elapsed_time
|
||||
elapsed_time="$(( end_time - OS_SCRIPT_START_TIME ))"
|
||||
local formatted_time
|
||||
formatted_time="$( os::util::format_seconds "${elapsed_time}" )"
|
||||
message+="after ${formatted_time}"
|
||||
fi
|
||||
|
||||
if [[ "${return_code}" = "0" ]]; then
|
||||
os::log::info "${message}"
|
||||
else
|
||||
os::log::error "${message}"
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::describe_return_code
|
||||
|
||||
# os::util::install_describe_return_code installs the return code describer for the EXIT trap
|
||||
# If the EXIT trap is not initialized, installing this plugin will initialize it.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - export OS_DESCRIBE_RETURN_CODE
|
||||
# - export OS_SCRIPT_START_TIME
|
||||
function os::util::install_describe_return_code() {
|
||||
export OS_DESCRIBE_RETURN_CODE="true"
|
||||
OS_SCRIPT_START_TIME="$( date +%s )"; export OS_SCRIPT_START_TIME
|
||||
os::util::trap::init_exit
|
||||
}
|
||||
readonly -f os::util::install_describe_return_code
|
||||
|
||||
# OS_ORIGINAL_WD is the original working directory the script sourcing this utility file was called
|
||||
# from. This is an important directory as if $0 is a relative path, we cannot use the following path
|
||||
# utility without knowing from where $0 is relative.
|
||||
if [[ -z "${OS_ORIGINAL_WD:-}" ]]; then
|
||||
# since this could be sourced in a context where the utilities are already loaded,
|
||||
# we want to ensure that this is re-entrant, so we only set $OS_ORIGINAL_WD if it
|
||||
# is not set already
|
||||
OS_ORIGINAL_WD="$( pwd )"
|
||||
readonly OS_ORIGINAL_WD
|
||||
export OS_ORIGINAL_WD
|
||||
fi
|
||||
|
||||
# os::util::repository_relative_path returns the relative path from the $OS_ROOT directory to the
|
||||
# given file, if the file is inside of the $OS_ROOT directory. If the file is outside of $OS_ROOT,
|
||||
# this function will return the absolute path to the file
|
||||
#
|
||||
# Globals:
|
||||
# - OS_ROOT
|
||||
# Arguments:
|
||||
# - 1: the path to relativize
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::repository_relative_path() {
|
||||
local filename=$1
|
||||
local directory; directory="$( dirname "${filename}" )"
|
||||
filename="$( basename "${filename}" )"
|
||||
|
||||
if [[ "${directory}" != "${OS_ROOT}"* ]]; then
|
||||
pushd "${OS_ORIGINAL_WD}" >/dev/null 2>&1
|
||||
directory="$( os::util::absolute_path "${directory}" )"
|
||||
popd >/dev/null 2>&1
|
||||
fi
|
||||
|
||||
directory="${directory##*${OS_ROOT}/}"
|
||||
|
||||
echo "${directory}/${filename}"
|
||||
}
|
||||
readonly -f os::util::repository_relative_path
|
||||
|
||||
# os::util::format_seconds formats a duration of time in seconds to print in HHh MMm SSs
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# - 1: time in seconds to format
|
||||
# Return:
|
||||
# None
|
||||
function os::util::format_seconds() {
|
||||
local raw_seconds=$1
|
||||
|
||||
local hours minutes seconds
|
||||
(( hours=raw_seconds/3600 ))
|
||||
(( minutes=(raw_seconds%3600)/60 ))
|
||||
(( seconds=raw_seconds%60 ))
|
||||
|
||||
printf '%02dh %02dm %02ds' "${hours}" "${minutes}" "${seconds}"
|
||||
}
|
||||
readonly -f os::util::format_seconds
|
||||
|
||||
# os::util::sed attempts to make our Bash scripts agnostic to the platform
|
||||
# on which they run `sed` by glossing over a discrepancy in flag use in GNU.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# - all: arguments to pass to `sed -i`
|
||||
# Return:
|
||||
# None
|
||||
function os::util::sed() {
|
||||
local sudo="${USE_SUDO:+sudo}"
|
||||
if LANG=C sed --help 2>&1 | grep -q "GNU sed"; then
|
||||
${sudo} sed -i'' "$@"
|
||||
else
|
||||
${sudo} sed -i '' "$@"
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::sed
|
||||
|
||||
# os::util::base64decode attempts to make our Bash scripts agnostic to the platform
|
||||
# on which they run `base64decode` by glossing over a discrepancy in flag use in GNU.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# - all: arguments to pass to `base64decode`
|
||||
# Return:
|
||||
# None
|
||||
function os::util::base64decode() {
|
||||
if [[ "$(go env GOHOSTOS)" == "darwin" ]]; then
|
||||
base64 -D "$@"
|
||||
else
|
||||
base64 -d "$@"
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::base64decode
|
||||
164
hack/lib/util/text.sh
Normal file
164
hack/lib/util/text.sh
Normal file
@@ -0,0 +1,164 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This file contains helpful aliases for manipulating the output text to the terminal as
|
||||
# well as functions for one-command augmented printing.
|
||||
|
||||
# os::text::reset resets the terminal output to default if it is called in a TTY
|
||||
function os::text::reset() {
|
||||
if os::text::internal::is_tty; then
|
||||
tput sgr0
|
||||
fi
|
||||
}
|
||||
readonly -f os::text::reset
|
||||
|
||||
# os::text::bold sets the terminal output to bold text if it is called in a TTY
|
||||
function os::text::bold() {
|
||||
if os::text::internal::is_tty; then
|
||||
tput bold
|
||||
fi
|
||||
}
|
||||
readonly -f os::text::bold
|
||||
|
||||
# os::text::red sets the terminal output to red text if it is called in a TTY
|
||||
function os::text::red() {
|
||||
if os::text::internal::is_tty; then
|
||||
tput setaf 1
|
||||
fi
|
||||
}
|
||||
readonly -f os::text::red
|
||||
|
||||
# os::text::green sets the terminal output to green text if it is called in a TTY
|
||||
function os::text::green() {
|
||||
if os::text::internal::is_tty; then
|
||||
tput setaf 2
|
||||
fi
|
||||
}
|
||||
readonly -f os::text::green
|
||||
|
||||
# os::text::blue sets the terminal output to blue text if it is called in a TTY
|
||||
function os::text::blue() {
|
||||
if os::text::internal::is_tty; then
|
||||
tput setaf 4
|
||||
fi
|
||||
}
|
||||
readonly -f os::text::blue
|
||||
|
||||
# os::text::yellow sets the terminal output to yellow text if it is called in a TTY
|
||||
function os::text::yellow() {
|
||||
if os::text::internal::is_tty; then
|
||||
tput setaf 11
|
||||
fi
|
||||
}
|
||||
readonly -f os::text::yellow
|
||||
|
||||
# os::text::clear_last_line clears the text from the last line of output to the
|
||||
# terminal and leaves the cursor on that line to allow for overwriting that text
|
||||
# if it is called in a TTY
|
||||
function os::text::clear_last_line() {
|
||||
if os::text::internal::is_tty; then
|
||||
tput cuu 1
|
||||
tput el
|
||||
fi
|
||||
}
|
||||
readonly -f os::text::clear_last_line
|
||||
|
||||
# os::text::clear_string attempts to clear the entirety of a string from the terminal.
|
||||
# If the string contains literal tabs or other characters that take up more than one
|
||||
# character space in output, or if the window size is changed before this function
|
||||
# is called, it will not function correctly.
|
||||
# No action is taken if this is called outside of a TTY
|
||||
function os::text::clear_string() {
|
||||
local -r string="$1"
|
||||
if os::text::internal::is_tty; then
|
||||
echo "${string}" | while read line; do
|
||||
# num_lines is the number of terminal lines this one line of output
|
||||
# would have taken up with the current terminal width in columns
|
||||
local num_lines=$(( ${#line} / $( tput cols ) ))
|
||||
for (( i = 0; i <= num_lines; i++ )); do
|
||||
os::text::clear_last_line
|
||||
done
|
||||
done
|
||||
fi
|
||||
}
|
||||
|
||||
# os::text::internal::is_tty determines if we are outputting to a TTY
|
||||
function os::text::internal::is_tty() {
|
||||
[[ -t 1 && -n "${TERM:-}" ]]
|
||||
}
|
||||
readonly -f os::text::internal::is_tty
|
||||
|
||||
# os::text::print_bold prints all input in bold text
|
||||
function os::text::print_bold() {
|
||||
os::text::bold
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_bold
|
||||
|
||||
# os::text::print_red prints all input in red text
|
||||
function os::text::print_red() {
|
||||
os::text::red
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_red
|
||||
|
||||
# os::text::print_red_bold prints all input in bold red text
|
||||
function os::text::print_red_bold() {
|
||||
os::text::red
|
||||
os::text::bold
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_red_bold
|
||||
|
||||
# os::text::print_green prints all input in green text
|
||||
function os::text::print_green() {
|
||||
os::text::green
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_green
|
||||
|
||||
# os::text::print_green_bold prints all input in bold green text
|
||||
function os::text::print_green_bold() {
|
||||
os::text::green
|
||||
os::text::bold
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_green_bold
|
||||
|
||||
# os::text::print_blue prints all input in blue text
|
||||
function os::text::print_blue() {
|
||||
os::text::blue
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_blue
|
||||
|
||||
# os::text::print_blue_bold prints all input in bold blue text
|
||||
function os::text::print_blue_bold() {
|
||||
os::text::blue
|
||||
os::text::bold
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_blue_bold
|
||||
|
||||
# os::text::print_yellow prints all input in yellow text
|
||||
function os::text::print_yellow() {
|
||||
os::text::yellow
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_yellow
|
||||
|
||||
# os::text::print_yellow_bold prints all input in bold yellow text
|
||||
function os::text::print_yellow_bold() {
|
||||
os::text::yellow
|
||||
os::text::bold
|
||||
echo "${*}"
|
||||
os::text::reset
|
||||
}
|
||||
readonly -f os::text::print_yellow_bold
|
||||
99
hack/lib/util/trap.sh
Normal file
99
hack/lib/util/trap.sh
Normal file
@@ -0,0 +1,99 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This library defines the trap handlers for the ERR and EXIT signals. Any new handler for these signals
|
||||
# must be added to these handlers and activated by the environment variable mechanism that the rest use.
|
||||
# These functions ensure that no handler can ever alter the exit code that was emitted by a command
|
||||
# in a test script.
|
||||
|
||||
# os::util::trap::init_err initializes the privileged handler for the ERR signal if it hasn't
|
||||
# been registered already. This will overwrite any other handlers registered on the signal.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::trap::init_err() {
|
||||
if ! trap -p ERR | grep -q 'os::util::trap::err_handler'; then
|
||||
trap 'os::util::trap::err_handler;' ERR
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::trap::init_err
|
||||
|
||||
# os::util::trap::init_exit initializes the privileged handler for the EXIT signal if it hasn't
|
||||
# been registered already. This will overwrite any other handlers registered on the signal.
|
||||
#
|
||||
# Globals:
|
||||
# None
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# None
|
||||
function os::util::trap::init_exit() {
|
||||
if ! trap -p EXIT | grep -q 'os::util::trap::exit_handler'; then
|
||||
trap 'os::util::trap::exit_handler;' EXIT
|
||||
fi
|
||||
}
|
||||
readonly -f os::util::trap::init_exit
|
||||
|
||||
# os::util::trap::err_handler is the handler for the ERR signal.
|
||||
#
|
||||
# Globals:
|
||||
# - OS_TRAP_DEBUG
|
||||
# - OS_USE_STACKTRACE
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - returns original return code, allows privileged handler to exit if necessary
|
||||
function os::util::trap::err_handler() {
|
||||
local -r return_code=$?
|
||||
local -r last_command="${BASH_COMMAND}"
|
||||
|
||||
if set +o | grep -q '\-o errexit'; then
|
||||
local -r errexit_set=true
|
||||
fi
|
||||
|
||||
if [[ "${OS_TRAP_DEBUG:-}" = "true" ]]; then
|
||||
echo "[DEBUG] Error handler executing with return code \`${return_code}\`, last command \`${last_command}\`, and errexit set \`${errexit_set:-}\`"
|
||||
fi
|
||||
|
||||
if [[ "${OS_USE_STACKTRACE:-}" = "true" ]]; then
|
||||
# the OpenShift stacktrace function is treated as a privileged handler for this signal
|
||||
# and is therefore allowed to run outside of a subshell in order to allow it to `exit`
|
||||
# if necessary
|
||||
os::log::stacktrace::print "${return_code}" "${last_command}" "${errexit_set:-}"
|
||||
fi
|
||||
|
||||
return "${return_code}"
|
||||
}
|
||||
readonly -f os::util::trap::err_handler
|
||||
|
||||
# os::util::trap::exit_handler is the handler for the EXIT signal.
|
||||
#
|
||||
# Globals:
|
||||
# - OS_TRAP_DEBUG
|
||||
# - OS_DESCRIBE_RETURN_CODE
|
||||
# Arguments:
|
||||
# None
|
||||
# Returns:
|
||||
# - original exit code of the script that exited
|
||||
function os::util::trap::exit_handler() {
|
||||
local -r return_code=$?
|
||||
|
||||
# we do not want these traps to be able to trigger more errors, we can let them fail silently
|
||||
set +o errexit
|
||||
|
||||
if [[ "${OS_TRAP_DEBUG:-}" = "true" ]]; then
|
||||
echo "[DEBUG] Exit handler executing with return code \`${return_code}\`"
|
||||
fi
|
||||
|
||||
# the following envars selectively enable optional exit traps, all of which are run inside of
|
||||
# a subshell in order to sandbox them and not allow them to influence how this script will exit
|
||||
if [[ "${OS_DESCRIBE_RETURN_CODE:-}" = "true" ]]; then
|
||||
( os::util::describe_return_code "${return_code}" )
|
||||
fi
|
||||
|
||||
exit "${return_code}"
|
||||
}
|
||||
readonly -f os::util::trap::exit_handler
|
||||
62
hack/move-upstream.sh
Executable file
62
hack/move-upstream.sh
Executable file
@@ -0,0 +1,62 @@
|
||||
#!/bin/bash
|
||||
|
||||
# See HACKING.md for usage
|
||||
# To apply all the kube UPSTREAM patches to a kubernetes git directory, you can
|
||||
# 1. Set UPSTREAM_DIR for your kube working directory
|
||||
# 2. Set TARGET_BRANCH for the new branch to work in
|
||||
# 3. In your kube git directory, set the current branch to the level to want to apply patches to
|
||||
# 4. Run `hack/move-upstream.sh master...<commit hash you want to start pulling patches from>`
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
repo="${UPSTREAM_REPO:-k8s.io/kubernetes}"
|
||||
package="${UPSTREAM_PACKAGE:-pkg/api}"
|
||||
|
||||
patch="${TMPDIR:-/tmp}/patch"
|
||||
rm -rf "${patch}"
|
||||
mkdir -p "${patch}"
|
||||
relativedir="${UPSTREAM_REPO_LOCATION:-../../../${repo}}"
|
||||
if [[ ! -d "${relativedir}" ]]; then
|
||||
echo "Expected ${relativedir} to exist" 1>&2
|
||||
exit 1
|
||||
fi
|
||||
|
||||
if [[ -z "${NO_REBASE-}" ]]; then
|
||||
if [[ "${package}" != "." ]]; then
|
||||
out="${repo}/${package}"
|
||||
else
|
||||
out="${repo}"
|
||||
fi
|
||||
lastrev="$(go run ${OS_ROOT}/tools/godepversion/godepversion.go ${OS_ROOT}/Godeps/Godeps.json ${out})"
|
||||
fi
|
||||
|
||||
branch="${TARGET_BRANCH:-$(git rev-parse --abbrev-ref HEAD)}"
|
||||
selector="origin/master...${branch}"
|
||||
if [[ -n "${1-}" ]]; then
|
||||
selector="$1"
|
||||
fi
|
||||
|
||||
echo "++ Generating patch for ${selector} onto ${lastrev} ..." 2>&1
|
||||
index=0
|
||||
for commit in $(git log --no-merges --format="%H" --reverse "${selector}" -- "vendor/${repo}/"); do
|
||||
git format-patch --raw --start-number=${index} --relative="vendor/${repo}/" "${commit}^..${commit}" -o "${patch}"
|
||||
let index+=10
|
||||
done
|
||||
|
||||
# remove all commits that had no entries
|
||||
find "${patch}" -type f -size 0 -exec rm {} \;
|
||||
|
||||
pushd "${relativedir}" > /dev/null
|
||||
os::build::require_clean_tree
|
||||
|
||||
# create a new branch
|
||||
git checkout -b "${branch}" "${lastrev}"
|
||||
|
||||
# apply the changes
|
||||
if ! git am -3 --ignore-whitespace ${patch}/*.patch; then
|
||||
echo 2>&1
|
||||
echo "++ Patches do not apply cleanly, continue with 'git am' in ${relativedir}" 2>&1
|
||||
exit 1
|
||||
fi
|
||||
|
||||
echo 2>&1
|
||||
echo "++ All patches applied cleanly upstream" 2>&1
|
||||
71
hack/push-release.sh
Executable file
71
hack/push-release.sh
Executable file
@@ -0,0 +1,71 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script pushes all of the built images to a registry.
|
||||
#
|
||||
# Set OS_PUSH_BASE_IMAGES=true to push base images
|
||||
# Set OS_PUSH_BASE_REGISTRY to prefix the destination images
|
||||
#
|
||||
STARTTIME=$(date +%s)
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
# Allow a release to be repushed with a tag
|
||||
tag="${OS_PUSH_TAG:-}"
|
||||
if [[ -n "${tag}" ]]; then
|
||||
if [[ "${tag}" == "HEAD" ]]; then
|
||||
if [[ "$( git tag --points-at HEAD | wc -l )" -ne 1 ]]; then
|
||||
echo "error: There must be exactly one tag pointing to HEAD to use OS_PUSH_TAG=HEAD"
|
||||
exit 1
|
||||
fi
|
||||
tag=":$( git tag --points-at HEAD)"
|
||||
else
|
||||
tag=":${tag}"
|
||||
fi
|
||||
else
|
||||
tag=":latest"
|
||||
fi
|
||||
|
||||
# Source tag
|
||||
source_tag="${OS_TAG:-}"
|
||||
if [[ -z "${source_tag}" ]]; then
|
||||
source_tag="latest"
|
||||
file="${OS_ROOT}/_output/local/releases/.commit"
|
||||
if [[ -e ${file} ]]; then
|
||||
source_tag="$(cat $file)"
|
||||
fi
|
||||
fi
|
||||
|
||||
images="${OS_ALL_IMAGES}"
|
||||
|
||||
PUSH_OPTS=""
|
||||
if docker push --help | grep -q force; then
|
||||
PUSH_OPTS="--force"
|
||||
fi
|
||||
|
||||
# Pull latest in preparation for tagging
|
||||
if [[ "${tag}" != ":latest" ]]; then
|
||||
if [[ -z "${OS_PUSH_LOCAL-}" ]]; then
|
||||
for image in "${images[@]}"; do
|
||||
docker pull "${OS_PUSH_BASE_REGISTRY-}${image}:${source_tag}"
|
||||
done
|
||||
else
|
||||
os::log::warning "Pushing local :${source_tag} images to ${OS_PUSH_BASE_REGISTRY-}*${tag}"
|
||||
if [[ -z "${OS_PUSH_ALWAYS:-}" ]]; then
|
||||
echo " CTRL+C to cancel, or any other key to continue"
|
||||
read
|
||||
fi
|
||||
fi
|
||||
fi
|
||||
|
||||
if [[ "${OS_PUSH_BASE_REGISTRY-}" != "" || "${tag}" != "" ]]; then
|
||||
for image in "${images[@]}"; do
|
||||
os::log::info "Tagging ${image}:${source_tag} as ${OS_PUSH_BASE_REGISTRY-}${image}${tag}..."
|
||||
docker tag "${image}:${source_tag}" "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
|
||||
done
|
||||
fi
|
||||
|
||||
for image in "${images[@]}"; do
|
||||
os::log::info "Pushing ${OS_PUSH_BASE_REGISTRY-}${image}${tag}..."
|
||||
docker push ${PUSH_OPTS} "${OS_PUSH_BASE_REGISTRY-}${image}${tag}"
|
||||
done
|
||||
|
||||
ret=$?; ENDTIME=$(date +%s); echo "$0 took $(($ENDTIME - $STARTTIME)) seconds"; exit "$ret"
|
||||
192
hack/test-go.sh
Executable file
192
hack/test-go.sh
Executable file
@@ -0,0 +1,192 @@
|
||||
#!/bin/bash
|
||||
#
|
||||
# This script runs Go language unit tests for the repository. Arguments to this script
|
||||
# are parsed as a list of packages to test until the first argument starting with '-' or '--' is
|
||||
# found. That argument and all following arguments are interpreted as flags to be passed directly
|
||||
# to `go test`. If no arguments are given, then "all" packages are tested.
|
||||
#
|
||||
# Coverage reports and jUnit XML reports can be generated by this script as well, but both cannot
|
||||
# be generated at once.
|
||||
#
|
||||
# This script consumes the following parameters as environment variables:
|
||||
# - DRY_RUN: prints all packages that would be tested with the args that would be used and exits
|
||||
# - TIMEOUT: the timeout for any one unit test (default '60s')
|
||||
# - DETECT_RACES: toggles the 'go test' race detector (defaults '-race')
|
||||
# - COVERAGE_OUTPUT_DIR: locates the directory in which coverage output files will be placed
|
||||
# - COVERAGE_SPEC: a set of flags for 'go test' that specify the coverage behavior (default '-cover -covermode=atomic')
|
||||
# - GOTEST_FLAGS: any other flags to be sent to 'go test'
|
||||
# - JUNIT_REPORT: toggles the creation of jUnit XML from the test output and changes this script's output behavior
|
||||
# to use the 'junitreport' tool for summarizing the tests.
|
||||
# - DLV_DEBUG toggles running tests using delve debugger
|
||||
function cleanup() {
|
||||
return_code=$?
|
||||
|
||||
os::test::junit::generate_report
|
||||
if [[ "${JUNIT_REPORT_NUM_FAILED:-}" == "0 failed" ]]; then
|
||||
if [[ "${return_code}" -ne "0" ]]; then
|
||||
os::log::warning "While the jUnit report found no failed tests, the \`go test\` process failed."
|
||||
os::log::warning "This usually means that the unit test suite failed to compile."
|
||||
fi
|
||||
fi
|
||||
|
||||
os::util::describe_return_code "${return_code}"
|
||||
exit "${return_code}"
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
os::build::setup_env
|
||||
os::cleanup::tmpdir
|
||||
|
||||
# Internalize environment variables we consume and default if they're not set
|
||||
dry_run="${DRY_RUN:-}"
|
||||
test_timeout="${TIMEOUT:-120s}"
|
||||
detect_races="${DETECT_RACES:-true}"
|
||||
coverage_output_dir="${COVERAGE_OUTPUT_DIR:-}"
|
||||
coverage_spec="${COVERAGE_SPEC:--cover -covermode atomic}"
|
||||
gotest_flags="${GOTEST_FLAGS:-}"
|
||||
junit_report="${JUNIT_REPORT:-}"
|
||||
dlv_debug="${DLV_DEBUG:-}"
|
||||
|
||||
if [[ -n "${junit_report}" && -n "${coverage_output_dir}" ]]; then
|
||||
echo "$0 cannot create jUnit XML reports and coverage reports at the same time."
|
||||
exit 1
|
||||
fi
|
||||
|
||||
# determine if user wanted verbosity
|
||||
verbose=
|
||||
if [[ "${gotest_flags}" =~ -v( |$) ]]; then
|
||||
verbose=true
|
||||
fi
|
||||
|
||||
# Build arguments for 'go test'
|
||||
if [[ -z "${verbose}" && -n "${junit_report}" ]]; then
|
||||
# verbosity can be set explicitly by the user or set implicitly by asking for the jUnit
|
||||
# XML report, so we only want to add the flag if it hasn't been added by a user already
|
||||
# and is being implicitly set by jUnit report generation
|
||||
gotest_flags+=" -v"
|
||||
fi
|
||||
|
||||
if [[ "${detect_races}" == "true" ]]; then
|
||||
gotest_flags+=" -race"
|
||||
fi
|
||||
|
||||
# check to see if user has not disabled coverage mode
|
||||
if [[ -n "${coverage_spec}" ]]; then
|
||||
# if we have a coverage spec set, we add it. '-race' implies '-cover -covermode atomic'
|
||||
# but specifying both at the same time does not lead to an error so we can add both specs
|
||||
gotest_flags+=" ${coverage_spec}"
|
||||
fi
|
||||
|
||||
# check to see if user has not disabled test timeouts
|
||||
if [[ -n "${test_timeout}" ]]; then
|
||||
gotest_flags+=" -timeout ${test_timeout}"
|
||||
fi
|
||||
|
||||
|
||||
# Break up the positional arguments into packages that need to be tested and arguments that need to be passed to `go test`
|
||||
package_args=
|
||||
for arg in "$@"; do
|
||||
if [[ "${arg}" =~ ^-.* ]]; then
|
||||
# we found an arg that begins with a dash, so we stop interpreting arguments
|
||||
# henceforth as packages and instead interpret them as flags to give to `go test`
|
||||
break
|
||||
fi
|
||||
# an arg found before the first flag is a package
|
||||
package_args+=" ${arg}"
|
||||
shift
|
||||
done
|
||||
gotest_flags+=" $*"
|
||||
|
||||
# Determine packages to test
|
||||
godeps_package_prefix="vendor/"
|
||||
test_packages=
|
||||
if [[ -n "${package_args}" ]]; then
|
||||
for package in ${package_args}; do
|
||||
# If we're trying to recursively test a package under Godeps, strip the Godeps prefix so go test can find the packages correctly
|
||||
if [[ "${package}" == "${godeps_package_prefix}"*"/..." ]]; then
|
||||
test_packages="${test_packages} ${package:${#godeps_package_prefix}}"
|
||||
else
|
||||
test_packages="${test_packages} ${OS_GO_PACKAGE}/${package}"
|
||||
fi
|
||||
done
|
||||
else
|
||||
# If no packages are given to test, we need to generate a list of all packages with unit tests
|
||||
test_packages="$(os::util::list_test_packages_under '*')"
|
||||
fi
|
||||
|
||||
if [[ -n "${dry_run}" ]]; then
|
||||
echo "The following base flags for \`go test\` will be used by $0:"
|
||||
echo "go test ${gotest_flags}"
|
||||
echo "The following packages will be tested by $0:"
|
||||
for package in ${test_packages}; do
|
||||
echo "${package}"
|
||||
done
|
||||
exit 0
|
||||
fi
|
||||
|
||||
# Run 'go test' with the accumulated arguments and packages:
|
||||
if [[ -n "${junit_report}" ]]; then
|
||||
# we need to generate jUnit xml
|
||||
|
||||
test_error_file="${LOG_DIR}/test-go-err.log"
|
||||
|
||||
os::log::info "Running \`go test\`..."
|
||||
# we don't care if the `go test` fails in this pipe, as we want to generate the report and summarize the output anyway
|
||||
set +o pipefail
|
||||
|
||||
go test -i ${gotest_flags} ${test_packages}
|
||||
go test ${gotest_flags} ${test_packages} 2>"${test_error_file}" | tee "${JUNIT_REPORT_OUTPUT}"
|
||||
|
||||
test_return_code="${PIPESTATUS[0]}"
|
||||
|
||||
set -o pipefail
|
||||
|
||||
if [[ -s "${test_error_file}" ]]; then
|
||||
os::log::warning "\`go test\` had the following output to stderr:
|
||||
$( cat "${test_error_file}") "
|
||||
fi
|
||||
|
||||
if grep -q 'WARNING: DATA RACE' "${JUNIT_REPORT_OUTPUT}"; then
|
||||
locations=( $( sed -n '/WARNING: DATA RACE/=' "${JUNIT_REPORT_OUTPUT}") )
|
||||
if [[ "${#locations[@]}" -gt 1 ]]; then
|
||||
os::log::warning "\`go test\` detected data races."
|
||||
os::log::warning "Details can be found in the full output file at lines ${locations[*]}."
|
||||
else
|
||||
os::log::warning "\`go test\` detected a data race."
|
||||
os::log::warning "Details can be found in the full output file at line ${locations[*]}."
|
||||
fi
|
||||
fi
|
||||
|
||||
exit "${test_return_code}"
|
||||
|
||||
elif [[ -n "${coverage_output_dir}" ]]; then
|
||||
# we need to generate coverage reports
|
||||
go test -i ${gotest_flags} ${test_packages}
|
||||
for test_package in ${test_packages}; do
|
||||
mkdir -p "${coverage_output_dir}/${test_package}"
|
||||
local_gotest_flags="${gotest_flags} -coverprofile=${coverage_output_dir}/${test_package}/profile.out"
|
||||
|
||||
go test ${local_gotest_flags} ${test_package}
|
||||
done
|
||||
|
||||
# assemble all profiles and generate a coverage report
|
||||
echo 'mode: atomic' > "${coverage_output_dir}/profiles.out"
|
||||
find "${coverage_output_dir}" -name profile.out | xargs sed '/^mode: atomic$/d' >> "${coverage_output_dir}/profiles.out"
|
||||
|
||||
go tool cover "-html=${coverage_output_dir}/profiles.out" -o "${coverage_output_dir}/coverage.html"
|
||||
os::log::info "Coverage profile written to ${coverage_output_dir}/coverage.html"
|
||||
|
||||
# clean up all of the individual coverage reports as they have been subsumed into the report at ${coverage_output_dir}/coverage.html
|
||||
# we can clean up all of the coverage reports at once as they all exist in subdirectories of ${coverage_output_dir}/${OS_GO_PACKAGE}
|
||||
# and they are the only files found in those subdirectories
|
||||
rm -rf "${coverage_output_dir:?}/${OS_GO_PACKAGE}"
|
||||
|
||||
elif [[ -n "${dlv_debug}" ]]; then
|
||||
# run tests using delve debugger
|
||||
dlv test ${test_packages}
|
||||
else
|
||||
# we need to generate neither jUnit XML nor coverage reports
|
||||
go test -i ${gotest_flags} ${test_packages}
|
||||
go test ${gotest_flags} ${test_packages}
|
||||
fi
|
||||
19
hack/verify-gofmt.sh
Executable file
19
hack/verify-gofmt.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
function cleanup() {
|
||||
return_code=$?
|
||||
os::util::describe_return_code "${return_code}"
|
||||
exit "${return_code}"
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
os::golang::verify_go_version
|
||||
|
||||
bad_files=$(os::util::list_go_src_files | xargs gofmt -s -l)
|
||||
if [[ -n "${bad_files}" ]]; then
|
||||
os::log::warning "!!! gofmt needs to be run on the listed files"
|
||||
echo "${bad_files}"
|
||||
os::log::fatal "Try running 'gofmt -s -d [path]'
|
||||
Or autocorrect with 'hack/verify-gofmt.sh | xargs -n 1 gofmt -s -w'"
|
||||
fi
|
||||
34
hack/verify-golint.sh
Executable file
34
hack/verify-golint.sh
Executable file
@@ -0,0 +1,34 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
os::golang::verify_go_version
|
||||
os::util::ensure::system_binary_exists 'golint'
|
||||
|
||||
arg="${1:-""}"
|
||||
bad_files=""
|
||||
|
||||
if [ "$arg" == "-m" ]; then
|
||||
head=$(git rev-parse --short HEAD | xargs echo -n)
|
||||
set +e
|
||||
modified_files=$(git diff-tree --no-commit-id --name-only -r master..$head | \
|
||||
grep "^pkg" | grep ".go$" | grep -v "bindata.go$" | grep -v "Godeps" | \
|
||||
grep -v "third_party")
|
||||
if [ -n "${modified_files}" ]; then
|
||||
echo -e "Checking modified files: ${modified_files}\n"
|
||||
for f in $modified_files; do golint $f; done
|
||||
echo
|
||||
fi
|
||||
set -e
|
||||
else
|
||||
bad_files=$(os::util::list_go_src_files | \
|
||||
sort -u | \
|
||||
sed 's/^.{2}//' | \
|
||||
xargs -n1 printf "${GOPATH}/src/${OS_GO_PACKAGE}/%s\n" | \
|
||||
xargs -n1 golint)
|
||||
fi
|
||||
|
||||
if [[ -n "${bad_files}" ]]; then
|
||||
echo "golint detected following problems:"
|
||||
echo "${bad_files}"
|
||||
exit 1
|
||||
fi
|
||||
41
hack/verify-govet.sh
Executable file
41
hack/verify-govet.sh
Executable file
@@ -0,0 +1,41 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
function cleanup() {
|
||||
return_code=$?
|
||||
os::util::describe_return_code "${return_code}"
|
||||
exit "${return_code}"
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
os::golang::verify_go_version
|
||||
|
||||
govet_blacklist=( "${OS_GOVET_BLACKLIST[@]-}" )
|
||||
|
||||
function govet_blacklist_contains() {
|
||||
local text=$1
|
||||
for blacklist_entry in "${govet_blacklist[@]-}"; do
|
||||
if grep -Eqx "${blacklist_entry}" <<<"${text}"; then
|
||||
# the text we got matches this blacklist entry
|
||||
return 0
|
||||
fi
|
||||
done
|
||||
return 1
|
||||
}
|
||||
|
||||
for test_dir in $(os::util::list_go_src_dirs); do
|
||||
if ! result="$(go tool vet -shadow=false -printfuncs=Info,Infof,Warning,Warningf "${test_dir}" 2>&1)"; then
|
||||
while read -r line; do
|
||||
if ! govet_blacklist_contains "${line}"; then
|
||||
echo "${line}"
|
||||
FAILURE=true
|
||||
fi
|
||||
done <<<"${result}"
|
||||
fi
|
||||
done
|
||||
|
||||
# We don't want to exit on the first failure of go vet, so just keep track of
|
||||
# whether a failure occurred or not.
|
||||
if [[ -n "${FAILURE:-}" ]]; then
|
||||
os::log::fatal "FAILURE: go vet failed!"
|
||||
fi
|
||||
19
hack/verify-imports.sh
Executable file
19
hack/verify-imports.sh
Executable file
@@ -0,0 +1,19 @@
|
||||
#!/bin/bash
|
||||
|
||||
# This script verifies that package trees
|
||||
# conform to our import restrictions
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
function cleanup() {
|
||||
return_code=$?
|
||||
os::test::junit::generate_report
|
||||
os::util::describe_return_code "${return_code}"
|
||||
exit "${return_code}"
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
os::util::ensure::built_binary_exists 'import-verifier'
|
||||
|
||||
os::test::junit::declare_suite_start "verify/imports"
|
||||
os::cmd::expect_success "import-verifier ${OS_ROOT}/hack/import-restrictions.json"
|
||||
os::test::junit::declare_suite_end
|
||||
20
hack/verify-upstream-commits.sh
Executable file
20
hack/verify-upstream-commits.sh
Executable file
@@ -0,0 +1,20 @@
|
||||
#!/bin/bash
|
||||
source "$(dirname "${BASH_SOURCE}")/lib/init.sh"
|
||||
|
||||
function cleanup() {
|
||||
return_code=$?
|
||||
os::test::junit::generate_report
|
||||
os::util::describe_return_code "${return_code}"
|
||||
exit "${return_code}"
|
||||
}
|
||||
trap "cleanup" EXIT
|
||||
|
||||
if ! git status &> /dev/null; then
|
||||
os::log::fatal "Not a Git repository"
|
||||
fi
|
||||
|
||||
os::util::ensure::built_binary_exists 'commitchecker'
|
||||
|
||||
os::test::junit::declare_suite_start "verify/upstream-commits"
|
||||
os::cmd::expect_success "commitchecker"
|
||||
os::test::junit::declare_suite_end
|
||||
130
image-registry.spec
Normal file
130
image-registry.spec
Normal file
@@ -0,0 +1,130 @@
|
||||
#
|
||||
# This is a template package spec that will support Go builds following the OpenShift conventions.
|
||||
# It expects a set of standard env vars that define the Git version being built and can also handle
|
||||
# multi-architecture Linux builds. It has stubs for cross building.
|
||||
#
|
||||
# Search for TODO and fill those sections out as appropriate.
|
||||
#
|
||||
|
||||
#debuginfo not supported with Go
|
||||
%global debug_package %{nil}
|
||||
|
||||
# modifying the Go binaries breaks the DWARF debugging
|
||||
%global __os_install_post %{_rpmconfigdir}/brp-compress
|
||||
|
||||
# %commit and %os_git_vars are intended to be set by tito custom builders provided
|
||||
# in the .tito/lib directory. The values in this spec file will not be kept up to date.
|
||||
%{!?commit: %global commit HEAD }
|
||||
%global shortcommit %(c=%{commit}; echo ${c:0:7})
|
||||
# os_git_vars needed to run hack scripts during rpm builds
|
||||
%{!?os_git_vars: %global os_git_vars OS_GIT_VERSION='' OS_GIT_COMMIT='' OS_GIT_MAJOR='' OS_GIT_MINOR='' OS_GIT_TREE_STATE='' }
|
||||
|
||||
%if 0%{?skip_build}
|
||||
%global do_build 0
|
||||
%else
|
||||
%global do_build 1
|
||||
%endif
|
||||
%if 0%{?skip_prep}
|
||||
%global do_prep 0
|
||||
%else
|
||||
%global do_prep 1
|
||||
%endif
|
||||
|
||||
%if 0%{?fedora} || 0%{?epel}
|
||||
%global need_redistributable_set 0
|
||||
%else
|
||||
# Due to library availability, redistributable builds only work on x86_64
|
||||
%ifarch x86_64
|
||||
%global need_redistributable_set 1
|
||||
%else
|
||||
%global need_redistributable_set 0
|
||||
%endif
|
||||
%endif
|
||||
%{!?make_redistributable: %global make_redistributable %{need_redistributable_set}}
|
||||
|
||||
#
|
||||
# Customize from here.
|
||||
#
|
||||
|
||||
%global golang_version 1.8.1
|
||||
%{!?version: %global version 0.0.1}
|
||||
%{!?release: %global release 1}
|
||||
%global package_name origin-dockerregistry
|
||||
%global product_name OpenShift Docker Registry
|
||||
%global import_path github.com/openshift/image-registry
|
||||
|
||||
Name: %{package_name}
|
||||
Version: %{version}
|
||||
Release: %{release}%{?dist}
|
||||
Summary: TODO
|
||||
License: ASL 2.0
|
||||
URL: https://%{import_path}
|
||||
|
||||
Source0: https://%{import_path}/archive/%{commit}/%{name}-%{version}.tar.gz
|
||||
BuildRequires: golang >= %{golang_version}
|
||||
|
||||
# If go_arches not defined fall through to implicit golang archs
|
||||
%if 0%{?go_arches:1}
|
||||
ExclusiveArch: %{go_arches}
|
||||
%else
|
||||
ExclusiveArch: x86_64 aarch64 ppc64le s390x
|
||||
%endif
|
||||
|
||||
### AUTO-BUNDLED-GEN-ENTRY-POINT
|
||||
|
||||
%description
|
||||
TODO
|
||||
|
||||
%prep
|
||||
%if 0%{do_prep}
|
||||
%setup -q
|
||||
%endif
|
||||
|
||||
%build
|
||||
%if 0%{do_build}
|
||||
%if 0%{make_redistributable}
|
||||
# Create Binaries for all internally defined arches
|
||||
%{os_git_vars} make build-cross
|
||||
%else
|
||||
# Create Binaries only for building arch
|
||||
%ifarch x86_64
|
||||
BUILD_PLATFORM="linux/amd64"
|
||||
%endif
|
||||
%ifarch %{ix86}
|
||||
BUILD_PLATFORM="linux/386"
|
||||
%endif
|
||||
%ifarch ppc64le
|
||||
BUILD_PLATFORM="linux/ppc64le"
|
||||
%endif
|
||||
%ifarch %{arm} aarch64
|
||||
BUILD_PLATFORM="linux/arm64"
|
||||
%endif
|
||||
%ifarch s390x
|
||||
BUILD_PLATFORM="linux/s390x"
|
||||
%endif
|
||||
OS_ONLY_BUILD_PLATFORMS="${BUILD_PLATFORM}" %{os_git_vars} make build-cross
|
||||
%endif
|
||||
%endif
|
||||
|
||||
%install
|
||||
|
||||
PLATFORM="$(go env GOHOSTOS)/$(go env GOHOSTARCH)"
|
||||
install -d %{buildroot}%{_bindir}
|
||||
|
||||
# Install linux components
|
||||
for bin in dockerregistry
|
||||
do
|
||||
echo "+++ INSTALLING ${bin}"
|
||||
install -p -m 755 _output/local/bin/${PLATFORM}/${bin} %{buildroot}%{_bindir}/${bin}
|
||||
done
|
||||
|
||||
%files
|
||||
%doc README.md
|
||||
%license LICENSE
|
||||
%{_bindir}/dockerregistry
|
||||
|
||||
%pre
|
||||
|
||||
%changelog
|
||||
* Mon Nov 06 2017 Anonymous <anon@nowhere.com> 0.0.1
|
||||
- Initial example of spec.
|
||||
@@ -6,7 +6,10 @@
|
||||
#
|
||||
FROM openshift/origin-base
|
||||
|
||||
ADD bin/dockerregistry /usr/bin/dockerregistry
|
||||
RUN INSTALL_PKGS="origin-dockerregistry" && \
|
||||
yum --enablerepo=origin-local-release install -y ${INSTALL_PKGS} && \
|
||||
rpm -V ${INSTALL_PKGS} && \
|
||||
yum clean all
|
||||
|
||||
COPY config.yml ${REGISTRY_CONFIGURATION_PATH}
|
||||
|
||||
|
||||
294
tools/changelog/changelog.go
Normal file
294
tools/changelog/changelog.go
Normal file
@@ -0,0 +1,294 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"fmt"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"regexp"
|
||||
"sort"
|
||||
"strings"
|
||||
)
|
||||
|
||||
var (
|
||||
mergeRequest = regexp.MustCompile(`Merge pull request #([\d]+)`)
|
||||
webconsoleBump = regexp.MustCompile(regexp.QuoteMeta("bump(github.com/openshift/origin-web-console): ") + `([\w]+)`)
|
||||
upstreamKube = regexp.MustCompile(`^UPSTREAM: (\d+)+:(.+)`)
|
||||
upstreamRepo = regexp.MustCompile(`^UPSTREAM: ([\w/-]+): (\d+)+:(.+)`)
|
||||
prefix = regexp.MustCompile(`^[\w-]: `)
|
||||
|
||||
assignments = []prefixAssignment{
|
||||
{"cluster up", "cluster"},
|
||||
{" pv ", "storage"},
|
||||
{"haproxy", "router"},
|
||||
{"router", "router"},
|
||||
{"route", "route"},
|
||||
{"authoriz", "auth"},
|
||||
{"rbac", "auth"},
|
||||
{"authent", "auth"},
|
||||
{"reconcil", "auth"},
|
||||
{"auth", "auth"},
|
||||
{"role", "auth"},
|
||||
{" dc ", "deploy"},
|
||||
{"deployment", "deploy"},
|
||||
{"rolling", "deploy"},
|
||||
{"security context constr", "security"},
|
||||
{"scc", "security"},
|
||||
{"pipeline", "build"},
|
||||
{"build", "build"},
|
||||
{"registry", "registry"},
|
||||
{"registries", "image"},
|
||||
{"image", "image"},
|
||||
{" arp ", "network"},
|
||||
{" cni ", "network"},
|
||||
{"egress", "network"},
|
||||
{"network", "network"},
|
||||
{"oc ", "cli"},
|
||||
{"template", "template"},
|
||||
{"etcd", "server"},
|
||||
{"pod", "node"},
|
||||
{"hack/", "hack"},
|
||||
{"e2e", "test"},
|
||||
{"integration", "test"},
|
||||
{"cluster", "cluster"},
|
||||
{"master", "server"},
|
||||
{"packages", "hack"},
|
||||
{"api", "server"},
|
||||
}
|
||||
)
|
||||
|
||||
type prefixAssignment struct {
|
||||
term string
|
||||
prefix string
|
||||
}
|
||||
|
||||
type commit struct {
|
||||
short string
|
||||
parents []string
|
||||
message string
|
||||
}
|
||||
|
||||
func contains(arr []string, value string) bool {
|
||||
for _, s := range arr {
|
||||
if s == value {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func main() {
|
||||
log.SetFlags(0)
|
||||
if len(os.Args) != 3 {
|
||||
log.Fatalf("Must specify two arguments, FROM and TO")
|
||||
}
|
||||
from := os.Args[1]
|
||||
to := os.Args[2]
|
||||
|
||||
out, err := exec.Command("git", "log", "--topo-order", "--pretty=tformat:%h %p|%s", "--reverse", fmt.Sprintf("%s..%s", from, to)).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
|
||||
hide := make(map[string]struct{})
|
||||
var apiChanges []string
|
||||
var webconsole []string
|
||||
var commits []commit
|
||||
var upstreams []commit
|
||||
var bumps []commit
|
||||
for _, line := range strings.Split(string(out), "\n") {
|
||||
if len(strings.TrimSpace(line)) == 0 {
|
||||
continue
|
||||
}
|
||||
parts := strings.SplitN(line, "|", 2)
|
||||
hashes := strings.Split(parts[0], " ")
|
||||
c := commit{short: hashes[0], parents: hashes[1:], message: parts[1]}
|
||||
|
||||
if strings.HasPrefix(c.message, "UPSTREAM: ") {
|
||||
hide[c.short] = struct{}{}
|
||||
upstreams = append(upstreams, c)
|
||||
}
|
||||
if strings.HasPrefix(c.message, "bump(") {
|
||||
hide[c.short] = struct{}{}
|
||||
bumps = append(bumps, c)
|
||||
}
|
||||
|
||||
if len(c.parents) == 1 {
|
||||
commits = append(commits, c)
|
||||
continue
|
||||
}
|
||||
|
||||
matches := mergeRequest.FindStringSubmatch(line)
|
||||
if len(matches) == 0 {
|
||||
// this may have been a human pressing the merge button, we'll just record this as a direct push
|
||||
continue
|
||||
}
|
||||
|
||||
// split the accumulated commits into any that are force merges (assumed to be the initial set due
|
||||
// to --topo-order) from the PR commits as soon as we see any of our merge parents. Then print
|
||||
// any of the force merges
|
||||
var first int
|
||||
for i := range commits {
|
||||
first = i
|
||||
if contains(c.parents, commits[i].short) {
|
||||
first++
|
||||
break
|
||||
}
|
||||
}
|
||||
individual := commits[:first]
|
||||
merged := commits[first:]
|
||||
for _, commit := range individual {
|
||||
if len(commit.parents) > 1 {
|
||||
continue
|
||||
}
|
||||
if _, ok := hide[commit.short]; ok {
|
||||
continue
|
||||
}
|
||||
fmt.Printf("force-merge: %s %s\n", commit.message, commit.short)
|
||||
}
|
||||
|
||||
// try to find either the PR title or the first commit title from the merge commit
|
||||
out, err := exec.Command("git", "show", "--pretty=tformat:%b", c.short).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
var message string
|
||||
para := strings.Split(string(out), "\n\n")
|
||||
if len(para) > 0 && strings.HasPrefix(para[0], "Automatic merge from submit-queue") {
|
||||
para = para[1:]
|
||||
}
|
||||
// this is no longer necessary with the submit queue in place
|
||||
if len(para) > 0 && strings.HasPrefix(para[0], "Merged by ") {
|
||||
para = para[1:]
|
||||
}
|
||||
// post submit-queue, the merge bot will add the PR title, which is usually pretty good
|
||||
if len(para) > 0 {
|
||||
message = strings.Split(para[0], "\n")[0]
|
||||
}
|
||||
if len(message) == 0 && len(merged) > 0 {
|
||||
message = merged[0].message
|
||||
}
|
||||
if len(message) > 0 && len(merged) == 1 && message == merged[0].message {
|
||||
merged = nil
|
||||
}
|
||||
|
||||
// try to calculate a prefix based on the diff
|
||||
if len(message) > 0 && !prefix.MatchString(message) {
|
||||
prefix, ok := findPrefixFor(message, merged)
|
||||
if ok {
|
||||
message = prefix + ": " + message
|
||||
}
|
||||
}
|
||||
|
||||
// github merge
|
||||
|
||||
// has api changes
|
||||
display := fmt.Sprintf("%s [\\#%s](https://github.com/openshift/origin/pull/%s)", message, matches[1], matches[1])
|
||||
if hasFileChanges(c.short, "api/") {
|
||||
apiChanges = append(apiChanges, display)
|
||||
}
|
||||
|
||||
var filtered []commit
|
||||
for _, commit := range merged {
|
||||
if _, ok := hide[commit.short]; ok {
|
||||
continue
|
||||
}
|
||||
filtered = append(filtered, commit)
|
||||
}
|
||||
if len(filtered) > 0 {
|
||||
fmt.Printf("- %s\n", display)
|
||||
for _, commit := range filtered {
|
||||
fmt.Printf(" - %s (%s)\n", commit.message, commit.short)
|
||||
}
|
||||
}
|
||||
|
||||
// stick the merge commit in at the beginning of the next list so we can anchor the previous parent
|
||||
commits = []commit{c}
|
||||
}
|
||||
|
||||
// chunk the bumps
|
||||
var lines []string
|
||||
for _, commit := range bumps {
|
||||
if m := webconsoleBump.FindStringSubmatch(commit.message); len(m) > 0 {
|
||||
webconsole = append(webconsole, m[1])
|
||||
continue
|
||||
}
|
||||
lines = append(lines, commit.message)
|
||||
}
|
||||
lines = sortAndUniq(lines)
|
||||
for _, line := range lines {
|
||||
fmt.Printf("- %s\n", line)
|
||||
}
|
||||
|
||||
// chunk the upstreams
|
||||
lines = nil
|
||||
for _, commit := range upstreams {
|
||||
lines = append(lines, commit.message)
|
||||
}
|
||||
lines = sortAndUniq(lines)
|
||||
for _, line := range lines {
|
||||
fmt.Printf("- %s\n", upstreamLinkify(line))
|
||||
}
|
||||
|
||||
if len(webconsole) > 0 {
|
||||
fmt.Printf("- web: from %s^..%s\n", webconsole[0], webconsole[len(webconsole)-1])
|
||||
}
|
||||
|
||||
for _, apiChange := range apiChanges {
|
||||
fmt.Printf(" - %s\n", apiChange)
|
||||
}
|
||||
}
|
||||
|
||||
func findPrefixFor(message string, commits []commit) (string, bool) {
|
||||
message = strings.ToLower(message)
|
||||
for _, m := range assignments {
|
||||
if strings.Contains(message, m.term) {
|
||||
return m.prefix, true
|
||||
}
|
||||
}
|
||||
for _, c := range commits {
|
||||
if prefix, ok := findPrefixFor(c.message, nil); ok {
|
||||
return prefix, ok
|
||||
}
|
||||
}
|
||||
return "", false
|
||||
}
|
||||
|
||||
func hasFileChanges(commit string, prefixes ...string) bool {
|
||||
out, err := exec.Command("git", "diff", "--name-only", fmt.Sprintf("%s^..%s", commit, commit)).CombinedOutput()
|
||||
if err != nil {
|
||||
log.Fatal(err)
|
||||
}
|
||||
for _, file := range strings.Split(string(out), "\n") {
|
||||
for _, prefix := range prefixes {
|
||||
if strings.HasPrefix(file, prefix) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func sortAndUniq(lines []string) []string {
|
||||
sort.Strings(lines)
|
||||
out := make([]string, 0, len(lines))
|
||||
last := ""
|
||||
for _, s := range lines {
|
||||
if last == s {
|
||||
continue
|
||||
}
|
||||
last = s
|
||||
out = append(out, s)
|
||||
}
|
||||
return out
|
||||
}
|
||||
|
||||
func upstreamLinkify(line string) string {
|
||||
if m := upstreamKube.FindStringSubmatch(line); len(m) > 0 {
|
||||
return fmt.Sprintf("UPSTREAM: [#%s](https://github.com/kubernetes/kubernetes/pull/%s):%s", m[1], m[1], m[2])
|
||||
}
|
||||
if m := upstreamRepo.FindStringSubmatch(line); len(m) > 0 {
|
||||
return fmt.Sprintf("UPSTREAM: [%s#%s](https://github.com/%s/pull/%s):%s", m[1], m[2], m[1], m[2], m[3])
|
||||
}
|
||||
return line
|
||||
}
|
||||
359
tools/import-verifier/import-verifier.go
Normal file
359
tools/import-verifier/import-verifier.go
Normal file
@@ -0,0 +1,359 @@
|
||||
package main
|
||||
|
||||
import (
|
||||
"bytes"
|
||||
"encoding/json"
|
||||
"fmt"
|
||||
"io"
|
||||
"io/ioutil"
|
||||
"log"
|
||||
"os"
|
||||
"os/exec"
|
||||
"strings"
|
||||
)
|
||||
|
||||
const (
|
||||
rootPackage = "github.com/openshift/origin"
|
||||
)
|
||||
|
||||
// Package is a subset of cmd/go.Package
|
||||
type Package struct {
|
||||
ImportPath string `json:",omitempty"` // import path of package in dir
|
||||
Imports []string `json:",omitempty"` // import paths used by this package
|
||||
TestImports []string `json:",omitempty"` // imports from TestGoFiles
|
||||
XTestImports []string `json:",omitempty"` // imports from XTestGoFiles
|
||||
}
|
||||
|
||||
type ImportRestriction struct {
|
||||
// CheckedPackageRoots are the roots of the package tree
|
||||
// that are restricted by this configuration
|
||||
CheckedPackageRoots []string `json:"checkedPackageRoots"`
|
||||
// CheckedPackages are the specific packages
|
||||
// that are restricted by this configuration
|
||||
CheckedPackages []string `json:"checkedPackages"`
|
||||
// IgnoredSubTrees are roots of sub-trees of the
|
||||
// BaseImportPath for which we do not want to enforce
|
||||
// any import restrictions whatsoever
|
||||
IgnoredSubTrees []string `json:"ignoredSubTrees,omitempty"`
|
||||
// AllowedImportPackages are roots of package trees that
|
||||
// are allowed to be imported for this restriction
|
||||
AllowedImportPackages []string `json:"allowedImportPackages"`
|
||||
// AllowedImportPackageRoots are roots of package trees that
|
||||
// are allowed to be imported for this restriction
|
||||
AllowedImportPackageRoots []string `json:"allowedImportPackageRoots"`
|
||||
// ForbiddenImportPackageRoots are roots of package trees that
|
||||
// are NOT allowed to be imported for this restriction
|
||||
ForbiddenImportPackageRoots []string `json:"forbiddenImportPackageRoots"`
|
||||
}
|
||||
|
||||
// ForbiddenImportsFor determines all of the forbidden
|
||||
// imports for a package given the import restrictions
|
||||
func (i *ImportRestriction) ForbiddenImportsFor(pkg Package) []string {
|
||||
if !i.isRestrictedPath(pkg.ImportPath) {
|
||||
return []string{}
|
||||
}
|
||||
return i.forbiddenImportsFor(pkg)
|
||||
}
|
||||
|
||||
// isRestrictedPath determines if the import path has
|
||||
// any restrictions placed on it by this configuration.
|
||||
// A path will be restricted if:
|
||||
// - it falls under the base import path
|
||||
// - it does not fall under any of the ignored sub-trees
|
||||
func (i *ImportRestriction) isRestrictedPath(packageToCheck string) bool {
|
||||
// if its not under our root, then its a built-in. Everything else is under
|
||||
// github.com/openshift/origin or github.com/openshift/origin/vendor
|
||||
if !strings.HasPrefix(packageToCheck, rootPackage) {
|
||||
return false
|
||||
}
|
||||
|
||||
// some subtrees are specifically excluded. Not sure if we still need this given
|
||||
// explicit inclusion
|
||||
for _, ignored := range i.IgnoredSubTrees {
|
||||
if strings.HasPrefix(packageToCheck, ignored) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
return true
|
||||
}
|
||||
|
||||
// forbiddenImportsFor determines all of the forbidden
|
||||
// imports for a package given the import restrictions
|
||||
// and returns a deduplicated list of them
|
||||
func (i *ImportRestriction) forbiddenImportsFor(pkg Package) []string {
|
||||
forbiddenImportSet := map[string]struct{}{}
|
||||
for _, packageToCheck := range append(pkg.Imports, append(pkg.TestImports, pkg.XTestImports...)...) {
|
||||
if !i.isAllowed(packageToCheck) {
|
||||
forbiddenImportSet[relativePackage(packageToCheck)] = struct{}{}
|
||||
}
|
||||
}
|
||||
|
||||
var forbiddenImports []string
|
||||
for imp := range forbiddenImportSet {
|
||||
forbiddenImports = append(forbiddenImports, imp)
|
||||
}
|
||||
return forbiddenImports
|
||||
}
|
||||
|
||||
// isForbidden determines if an import is forbidden,
|
||||
// which is true when the import is:
|
||||
// - of a package under the rootPackage
|
||||
// - is not of the base import path or a sub-package of it
|
||||
// - is not of an allowed path or a sub-package of one
|
||||
func (i *ImportRestriction) isAllowed(packageToCheck string) bool {
|
||||
// if its not under our root, then its a built-in. Everything else is under
|
||||
// github.com/openshift/origin or github.com/openshift/origin/vendor
|
||||
if !strings.HasPrefix(packageToCheck, rootPackage) {
|
||||
return true
|
||||
}
|
||||
if i.isIncludedInRestrictedPackages(packageToCheck) {
|
||||
return true
|
||||
}
|
||||
|
||||
for _, forbiddenPackageRoot := range i.ForbiddenImportPackageRoots {
|
||||
if strings.HasPrefix(forbiddenPackageRoot, "vendor") {
|
||||
forbiddenPackageRoot = rootPackage + "/" + forbiddenPackageRoot
|
||||
}
|
||||
if strings.HasPrefix(packageToCheck, forbiddenPackageRoot) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
for _, allowedPackage := range i.AllowedImportPackages {
|
||||
if strings.HasPrefix(allowedPackage, "vendor") {
|
||||
allowedPackage = rootPackage + "/" + allowedPackage
|
||||
}
|
||||
if packageToCheck == allowedPackage {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, allowedPackageRoot := range i.AllowedImportPackageRoots {
|
||||
if strings.HasPrefix(allowedPackageRoot, "vendor") {
|
||||
allowedPackageRoot = rootPackage + "/" + allowedPackageRoot
|
||||
}
|
||||
if strings.HasPrefix(packageToCheck, allowedPackageRoot) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
|
||||
return false
|
||||
}
|
||||
|
||||
// isIncludedInRestrictedPackages checks to see if a package is included in the list of packages we're
|
||||
// restricting. Any package being restricted is assumed to be allowed to import another package being
|
||||
// restricted since they are grouped
|
||||
func (i *ImportRestriction) isIncludedInRestrictedPackages(packageToCheck string) bool {
|
||||
// some subtrees are specifically excluded. Not sure if we still need this given
|
||||
// explicit inclusion
|
||||
for _, ignored := range i.IgnoredSubTrees {
|
||||
if strings.HasPrefix(packageToCheck, ignored) {
|
||||
return false
|
||||
}
|
||||
}
|
||||
|
||||
for _, currBase := range i.CheckedPackageRoots {
|
||||
if strings.HasPrefix(packageToCheck, currBase) {
|
||||
return true
|
||||
}
|
||||
}
|
||||
for _, currPackageName := range i.CheckedPackages {
|
||||
if currPackageName == packageToCheck {
|
||||
return true
|
||||
}
|
||||
}
|
||||
return false
|
||||
}
|
||||
|
||||
func relativePackage(absolutePackage string) string {
|
||||
if strings.HasPrefix(absolutePackage, rootPackage+"/vendor") {
|
||||
return absolutePackage[len(rootPackage)+1:]
|
||||
}
|
||||
return absolutePackage
|
||||
}
|
||||
|
||||
func main() {
|
||||
if len(os.Args) != 2 {
|
||||
log.Fatalf("%s requires the configuration file as it's only argument", os.Args[0])
|
||||
}
|
||||
|
||||
configFile := os.Args[1]
|
||||
importRestrictions, err := loadImportRestrictions(configFile)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to load import restrictions: %v", err)
|
||||
}
|
||||
|
||||
failedRestrictionCheck := false
|
||||
for _, restriction := range importRestrictions {
|
||||
packages := []Package{}
|
||||
for _, currBase := range restriction.CheckedPackageRoots {
|
||||
log.Printf("Inspecting imports under %s...\n", currBase)
|
||||
currPackages, err := resolvePackage(currBase + "/...")
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to resolve package tree %v: %v", currBase, err)
|
||||
}
|
||||
packages = mergePackages(packages, currPackages)
|
||||
}
|
||||
for _, currPackageName := range restriction.CheckedPackages {
|
||||
log.Printf("Inspecting imports at %s...\n", currPackageName)
|
||||
currPackages, err := resolvePackage(currPackageName)
|
||||
if err != nil {
|
||||
log.Fatalf("Failed to resolve package %v: %v", currPackageName, err)
|
||||
}
|
||||
packages = mergePackages(packages, currPackages)
|
||||
}
|
||||
|
||||
if len(packages) == 0 {
|
||||
log.Fatalf("No packages found")
|
||||
}
|
||||
log.Printf("-- validating imports for %d packages in the tree", len(packages))
|
||||
for _, pkg := range packages {
|
||||
if forbidden := restriction.ForbiddenImportsFor(pkg); len(forbidden) != 0 {
|
||||
logForbiddenPackages(relativePackage(pkg.ImportPath), forbidden)
|
||||
failedRestrictionCheck = true
|
||||
}
|
||||
}
|
||||
|
||||
// make sure that all the allowed imports are used
|
||||
if unused := unusedPackageImports(restriction.AllowedImportPackages, packages); len(unused) > 0 {
|
||||
log.Printf("-- found unused package imports\n")
|
||||
for _, unusedPackage := range unused {
|
||||
log.Printf("\t%s\n", unusedPackage)
|
||||
}
|
||||
failedRestrictionCheck = true
|
||||
}
|
||||
if unused := unusedPackageImportRoots(restriction.AllowedImportPackageRoots, packages); len(unused) > 0 {
|
||||
log.Printf("-- found unused package import roots\n")
|
||||
for _, unusedPackage := range unused {
|
||||
log.Printf("\t%s\n", unusedPackage)
|
||||
}
|
||||
failedRestrictionCheck = true
|
||||
}
|
||||
|
||||
log.Printf("\n")
|
||||
}
|
||||
|
||||
if failedRestrictionCheck {
|
||||
os.Exit(1)
|
||||
}
|
||||
}
|
||||
|
||||
func unusedPackageImports(allowedPackageImports []string, packages []Package) []string {
|
||||
ret := []string{}
|
||||
for _, allowedImport := range allowedPackageImports {
|
||||
if strings.HasPrefix(allowedImport, "vendor") {
|
||||
allowedImport = rootPackage + "/" + allowedImport
|
||||
}
|
||||
found := false
|
||||
for _, pkg := range packages {
|
||||
for _, packageToCheck := range append(pkg.Imports, append(pkg.TestImports, pkg.XTestImports...)...) {
|
||||
if packageToCheck == allowedImport {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
ret = append(ret, relativePackage(allowedImport))
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func unusedPackageImportRoots(allowedPackageImportRoots []string, packages []Package) []string {
|
||||
ret := []string{}
|
||||
for _, allowedImportRoot := range allowedPackageImportRoots {
|
||||
if strings.HasPrefix(allowedImportRoot, "vendor") {
|
||||
allowedImportRoot = rootPackage + "/" + allowedImportRoot
|
||||
}
|
||||
found := false
|
||||
for _, pkg := range packages {
|
||||
for _, packageToCheck := range append(pkg.Imports, append(pkg.TestImports, pkg.XTestImports...)...) {
|
||||
if strings.HasPrefix(packageToCheck, allowedImportRoot) {
|
||||
found = true
|
||||
break
|
||||
}
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
ret = append(ret, relativePackage(allowedImportRoot))
|
||||
}
|
||||
}
|
||||
|
||||
return ret
|
||||
}
|
||||
|
||||
func mergePackages(existingPackages, currPackages []Package) []Package {
|
||||
for _, currPackage := range currPackages {
|
||||
found := false
|
||||
for _, existingPackage := range existingPackages {
|
||||
if existingPackage.ImportPath == currPackage.ImportPath {
|
||||
log.Printf("-- Skipping: %v", currPackage.ImportPath)
|
||||
found = true
|
||||
}
|
||||
}
|
||||
if !found {
|
||||
// this was super noisy.
|
||||
//log.Printf("-- Adding: %v", currPackage.ImportPath)
|
||||
existingPackages = append(existingPackages, currPackage)
|
||||
}
|
||||
}
|
||||
|
||||
return existingPackages
|
||||
}
|
||||
|
||||
func loadImportRestrictions(configFile string) ([]ImportRestriction, error) {
|
||||
config, err := ioutil.ReadFile(configFile)
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("failed to load configuration from %s: %v", configFile, err)
|
||||
}
|
||||
|
||||
var importRestrictions []ImportRestriction
|
||||
if err := json.Unmarshal(config, &importRestrictions); err != nil {
|
||||
return nil, fmt.Errorf("failed to unmarshal from %s: %v", configFile, err)
|
||||
}
|
||||
|
||||
return importRestrictions, nil
|
||||
}
|
||||
|
||||
func resolvePackage(targetPackage string) ([]Package, error) {
|
||||
cmd := "go"
|
||||
args := []string{"list", "-json", targetPackage}
|
||||
stdout, err := exec.Command(cmd, args...).Output()
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to run `%s %s`: %v\n", cmd, strings.Join(args, " "), err)
|
||||
}
|
||||
|
||||
packages, err := decodePackages(bytes.NewReader(stdout))
|
||||
if err != nil {
|
||||
return nil, fmt.Errorf("Failed to decode packages: %v", err)
|
||||
}
|
||||
|
||||
return packages, nil
|
||||
}
|
||||
|
||||
func decodePackages(r io.Reader) ([]Package, error) {
|
||||
// `go list -json` concatenates package definitions
|
||||
// instead of emitting a single valid JSON, so we
|
||||
// need to stream the output to decode it into the
|
||||
// data we are looking for instead of just using a
|
||||
// simple JSON decoder on stdout
|
||||
var packages []Package
|
||||
decoder := json.NewDecoder(r)
|
||||
for decoder.More() {
|
||||
var pkg Package
|
||||
if err := decoder.Decode(&pkg); err != nil {
|
||||
return nil, fmt.Errorf("invalid package: %v", err)
|
||||
}
|
||||
packages = append(packages, pkg)
|
||||
}
|
||||
|
||||
return packages, nil
|
||||
}
|
||||
|
||||
func logForbiddenPackages(base string, forbidden []string) {
|
||||
log.Printf("-- found forbidden imports for %s:\n", base)
|
||||
for _, forbiddenPackage := range forbidden {
|
||||
log.Printf("\t%s\n", forbiddenPackage)
|
||||
}
|
||||
}
|
||||
Reference in New Issue
Block a user