diff --git a/.pre-commit-config.yaml b/.pre-commit-config.yaml index 69a1e39f90a..b9df51a76fb 100644 --- a/.pre-commit-config.yaml +++ b/.pre-commit-config.yaml @@ -1,23 +1,19 @@ repos: -- repo: https://github.com/ambv/black - rev: stable - hooks: - - id: black - args: [--diff] - language_version: python3 - - id: black - name: black-format - language_version: python3 - stages: [manual] -- repo: https://github.com/pre-commit/pre-commit-hooks - rev: v1.3.0 - hooks: - - id: flake8 -- repo: local - hooks: - - id: pylint - name: pylint - entry: pylint - args: [-E, -j4, --disable=invalid-sequence-index, --disable=no-member, --disable=no-name-in-module] - language: system - types: [python] +- repo: https://github.com/python/black + rev: stable + hooks: + - id: black + args: [--diff, --check] + language_version: python3.6 +- repo: https://gitlab.com/pycqa/flake8 + rev: 3.7.7 + hooks: + - id: flake8 +- repo: local + hooks: + - id: pylint + name: pylint + entry: pylint + args: [-E, -j4, --disable=invalid-sequence-index, --disable=no-member, --disable=no-name-in-module] + language: system + types: [python] diff --git a/frozen_requirements.txt b/frozen_requirements.txt index 3617fe07af2..750bba6c877 100644 --- a/frozen_requirements.txt +++ b/frozen_requirements.txt @@ -82,7 +82,7 @@ pathlib2==2.3.3 pefile==2018.8.8 pkginfo==1.2.1 pluggy==0.8.1 -pre-commit==1.14.4 +pre-commit==1.16.1 prettytable==0.7.2 py==1.7.0 pyasn1==0.4.5 diff --git a/pyproject.toml b/pyproject.toml index 71e7a19569b..4ef86121cf5 100644 --- a/pyproject.toml +++ b/pyproject.toml @@ -1,3 +1,3 @@ [tool.black] line-length = 100 -py36 = true +target-version = ["py36"] diff --git a/run_container.sh b/run_container.sh new file mode 120000 index 00000000000..61a58b02046 --- /dev/null +++ b/run_container.sh @@ -0,0 +1 @@ +test.sh \ No newline at end of file diff --git a/test.sh b/test.sh index 2d2737e0c01..de61a622dff 100755 --- a/test.sh +++ b/test.sh @@ -1,394 +1,541 @@ #!/usr/bin/env bash -# Verifies environment and launches docker to execute test_runner.sh -# 1. I can pick up a brand new laptop, and as long as I have docker installed, everything will just work if I do ./test.sh -# 2. I want test.sh to default to running _all_ tests for that framework. -# 3. I want to be able to pass -m or -k to pytest -# 4. If I pass `all` instead of a fw name, it will run all frameworks -# 5. test.sh should validate i have the AWS keys, and a CLUSTER_URL set, but it need not verify the azure keys / security / etc +# Sets the stage to run arbitrary commands (most importantly test_runner.sh) in +# a Docker container using by default the mesosphere/dcos-commons Docker image. + +# Will run any commands specified in the DOCKER_COMMAND environment variable. +# e.g.: +# $ DOCKER_COMMAND=ls ./test.sh + +# Goals: +# 1. I can pick up a brand new laptop, and as long as I have Docker installed, +# everything will just work if I run "./test.sh $framework". +# +# 2. I want test.sh to default to running all tests for that framework. +# +# 3. I want to be able to pass -m or -k to pytest. +# +# 4. If I pass "all" instead of a framework name, it will run all frameworks. +# +# 5. This script should validate that AWS credentials exist, and a CLUSTER_URL +# set, but it need not verify the azure keys / security / etc. + +set -eo pipefail + +readonly REQUIREMENTS='docker' + +for requirement in ${REQUIREMENTS}; do + if ! [[ -x $(command -v "${requirement}") ]]; then + echo "You need to install '${requirement}' to run this script" + exit 1 + fi +done -# Exit immediately on errors -set -e +if [ "${1//-/}" == "help" ] || [ "${1//-/}" == "h" ]; then + help="true" +fi timestamp="$(date +%y%m%d-%H%M%S)" -# Create a temp file for docker env. -# When the script exits (successfully or otherwise), clean up the file automatically. -tmp_aws_creds_path="$(mktemp /tmp/sdk-test-creds-${timestamp}-XXXX.tmp)" -envfile="$(mktemp /tmp/sdk-test-env-${timestamp}-XXXX.tmp)" +tmp_aws_credentials_file="$(mktemp "/tmp/dcos-commons-aws-credentials-${timestamp}-XXXX.tmp")" +env_file="$(mktemp "/tmp/dcos-commons-env-file-${timestamp}-XXXX.tmp")" + function cleanup { - rm -f ${tmp_aws_creds_path} - rm -f ${envfile} + rm -f "${tmp_aws_credentials_file}" + rm -f "${env_file}" } trap cleanup EXIT -REPO_ROOT_DIR=${REPO_ROOT_DIR:="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )"} -WORK_DIR="/build" # where REPO_ROOT_DIR is mounted within the image - -# Find out what framework(s) are available. -# - If there's a /frameworks directory, get values from there. -# - Otherwise just use the name of the repo directory. -# If there's multiple options, the user needs to pick one. If there's only one option then we'll use that automatically. -if [ -d $REPO_ROOT_DIR/frameworks ]; then - # mono-repo (e.g. dcos-commons) - FRAMEWORK_LIST=$(ls $REPO_ROOT_DIR/frameworks | sort | xargs echo -n) -else - # standalone repo (e.g. spark-build) - FRAMEWORK_LIST=$(basename ${REPO_ROOT_DIR}) -fi - - -if [ -n "$AZURE_DEV_CLIENT_ID" -a -n "$AZURE_DEV_CLIENT_SECRET" -a \ - -n "$AZURE_DEV_TENANT_ID" -a -n "$AZURE_DEV_STORAGE_ACCOUNT" -a \ - -n "$AZURE_DEV_STORAGE_KEY" ]; then - azure_enabled="true" +DCOS_COMMONS_DIRECTORY=${DCOS_COMMONS_DIRECTORY:="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)"} +# Where $DCOS_COMMONS_DIRECTORY is mounted in the container. +# +# If this script is run with the '--project' flag, the project's "frameworks" +# directory will be mounted at '$WORK_DIR/frameworks' in the container. +WORK_DIR="/build" + +################################################################################ +#################### Default values for CLI parameters ######################### +################################################################################ + +if [ -n "${AZURE_DEV_CLIENT_ID}" ] \ + && [ -n "${AZURE_DEV_CLIENT_SECRET}" ] \ + && [ -n "${AZURE_DEV_TENANT_ID}" ] \ + && [ -n "${AZURE_DEV_STORAGE_ACCOUNT}" ] \ + && [ -n "${AZURE_DEV_STORAGE_KEY}" ]; then + azure_enabled="true" fi -# Set default values security="permissive" -if [ -n "$azure_enabled" ]; then - pytest_m="sanity" +if [ -n "${azure_enabled}" ]; then + pytest_m="sanity" else - pytest_m="sanity and not azure" + pytest_m="sanity and not azure" fi -gradle_cache="${REPO_ROOT_DIR}/.gradle_cache" +gradle_cache="${DCOS_COMMONS_DIRECTORY}/.gradle_cache" ssh_path="${HOME}/.ssh/ccm.pem" ssh_user="core" -aws_creds_path="${HOME}/.aws/credentials" +aws_credentials_path="${HOME}/.aws/credentials" enterprise="true" headless="false" interactive="false" package_registry="false" -docker_command=${DOCKER_COMMAND:="bash /build-tools/test_runner.sh $WORK_DIR"} -docker_image=${DOCKER_IMAGE:-"mesosphere/dcos-commons:latest"} +docker_options="${DOCKER_OPTIONS:=}" +docker_command="${DOCKER_COMMAND:=bash ${WORK_DIR}/tools/ci/test_runner.sh ${WORK_DIR}}" +docker_image="${DOCKER_IMAGE:-mesosphere/dcos-commons:latest}" env_passthrough= -envfile_input= +env_file_input= + +################################################################################ +################################ CLI usage ##################################### +################################################################################ function usage() { - echo "Usage: $0 [flags] [framework:$(echo $FRAMEWORK_LIST | sed 's/ /,/g')]" - echo "" - echo "Flags:" - echo " -m $pytest_m" - echo " -k " - echo " Test filters passed through to pytest. Other arguments may be passed with PYTEST_ARGS." - echo " -s" - echo " Using a strict mode cluster: configure/use ACLs." - echo " -o" - echo " Using an Open DC/OS cluster: skip Enterprise-only features." - echo " -p $ssh_path" - echo " Path to cluster SSH key." - echo " -l $ssh_user" - echo " Username to use for SSH commands into the cluster." - echo " -e $env_passthrough" - echo " A comma-separated list of environment variables to pass through to the running docker container" - echo " --envfile $envfile_input" - echo " A path to an envfile to pass to the docker container in addition to those required by the test scripts" - echo " -i/--interactive" - echo " Open a shell prompt in the docker container, without actually running any tests. Equivalent to DOCKER_COMMAND=bash" - echo " --headless" - echo " Run docker command in headless mode, without attaching to stdin. Sometimes needed in CI." - echo " --package-registry" - echo " Enables using a package registry to install packages. Works in 1.12.1 and above only." - echo " --dcos-files-path DIR" - echo " Sets the directory to look for .dcos files. If empty, uses stub universe urls to build .dcos file(s)." - echo " --gradle-cache $gradle_cache" - echo " Sets the gradle build cache to the specified path. Setting this to \"\" disables the cache." - echo " -a/--aws $aws_creds_path" - echo " Path to an AWS credentials file. Overrides any AWS_* env credentials." - echo " --aws-profile ${AWS_PROFILE:=NAME}" - echo " The AWS profile to use. Only required when using an AWS credentials file with multiple profiles." - echo "" - echo "Environment:" - echo " CLUSTER_URL" - echo " URL to cluster. If unset then a cluster will be created using dcos-launch" - echo " STUB_UNIVERSE_URL" - echo " One or more comma-separated stub-universe URLs. If unset then a build will be performed internally." - echo " DCOS_LOGIN_USERNAME/DCOS_LOGIN_PASSWORD" - echo " Custom login credentials to use for the cluster." - echo " AZURE_[CLIENT_ID,CLIENT_SECRET,TENANT_ID,STORAGE_ACCOUNT,STORAGE_KEY]" - echo " Enables Azure tests. The -m default is automatically updated to include any Azure tests." - echo " AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY or AWS_DEV_ACCESS_KEY_ID/AWS_DEV_SECRET_ACCESS_KEY" - echo " AWS credentials to use if the credentials file is unavailable." - echo " S3_BUCKET" - echo " S3 bucket to use for testing." - echo " DOCKER_COMMAND=$docker_command" - echo " Command to be run within the docker image (e.g. 'DOCKER_COMMAND=bash' to just get a prompt)" - echo " REPO_ROOT_DIR=${REPO_ROOT_DIR}" - echo " Allows for overriding the location of the repository's root directory. Autodetected by default." - echo " Must be an absolute path." - echo " PYTEST_ARGS" - echo " Additional arguments (other than -m or -k) to pass to pytest." - echo " TEST_SH_*" - echo " Anything starting with TEST_SH_* will be forwarded to the container with that prefix removed." - echo " For example, 'TEST_SH_FOO=BAR' is included as 'FOO=BAR'." + set +x + echo "Usage: $0 [flags] [framework:${FRAMEWORK_LIST// /,}]" + echo + echo "Parameters:" + echo " --project ${project}" + echo " Full path to a 'dcos-commons'-based project's directory. E.g.: /path/to/dcos-kafka-service, /path/to/dcos-elastic-service" + echo + echo " -m ${pytest_m}" + echo " Only run tests matching given mark expression. Example: -m 'sanity and not azure'" + echo + echo " -k " + echo " Only run tests which match the given substring expression. Example : -k 'test_tls and not test_tls_soak'" + echo + echo " -s" + echo " Using a strict mode cluster: configure/use ACLs." + echo + echo " -o" + echo " Using an Open DC/OS cluster: skip Enterprise-only features." + echo + echo " -p ${ssh_path}" + echo " Path to cluster SSH key." + echo + echo " -l ${ssh_user}" + echo " Username to use for SSH commands into the cluster." + echo + echo " -e ${env_passthrough}" + echo " A comma-separated list of environment variables to pass through to the running docker container" + echo + echo " --env_file ${env_file_input}" + echo " A path to an env_file to pass to the docker container in addition to those required by the test scripts" + echo + echo " -i/--interactive" + echo " Open a shell prompt in the docker container, without actually running any tests. Equivalent to DOCKER_COMMAND=bash" + echo + echo " --headless" + echo " Run docker command in headless mode, without attaching to stdin. Sometimes needed in CI." + echo + echo " --package-registry" + echo " Enables using a package registry to install packages. Works in 1.12.1 and above only." + echo + echo " --dcos-files-path DIR" + echo " Sets the directory to look for .dcos files. If empty, uses stub universe urls to build .dcos file(s)." + echo + echo " --gradle-cache ${gradle_cache}" + echo " Sets the gradle build cache to the specified path. Setting this to \"\" disables the cache." + echo + echo " -a/--aws ${aws_credentials_path}" + echo " Path to an AWS credentials file. Overrides any AWS_* env credentials." + echo + echo " --aws-profile ${AWS_PROFILE:=NAME}" + echo " The AWS profile to use. Only required when using an AWS credentials file with multiple profiles." + echo + echo "---" + echo + echo "Environment variables:" + echo " CLUSTER_URL" + echo " URL to cluster. If unset then a cluster will be created using dcos-launch" + echo " STUB_UNIVERSE_URL" + echo " One or more comma-separated stub-universe URLs. If unset then a build will be performed internally." + echo " DCOS_LOGIN_USERNAME/DCOS_LOGIN_PASSWORD" + echo " Custom login credentials to use for the cluster." + echo " AZURE_{CLIENT_ID,CLIENT_SECRET,TENANT_ID,STORAGE_ACCOUNT,STORAGE_KEY}" + echo " Enables Azure tests. The -m default is automatically updated to include any Azure tests." + echo " AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY or AWS_DEV_ACCESS_KEY_ID/AWS_DEV_SECRET_ACCESS_KEY" + echo " AWS credentials to use if the credentials file is unavailable." + echo " S3_BUCKET" + echo " S3 bucket to use for testing." + echo " DOCKER_COMMAND=${docker_command}" + echo " Command to be run within the docker image (e.g. 'DOCKER_COMMAND=bash' to just get a prompt)" + echo " DCOS_COMMONS_DIRECTORY=${DCOS_COMMONS_DIRECTORY}" + echo " Allows for overriding the location of the repository's root directory. Autodetected by default." + echo " Must be an absolute path." + echo " PYTEST_ARGS" + echo " Additional arguments (other than -m or -k) to pass to pytest." + echo " TEST_SH_*" + echo " Anything starting with TEST_SH_* will be forwarded to the container with that prefix removed." + echo " For example, 'TEST_SH_FOO=BAR' is included as 'FOO=BAR'." } -if [ x"${1//-/}" == x"help" -o x"${1//-/}" == x"h" ]; then - usage - exit 1 +if [ "${help}" == "true" ]; then + usage + exit 1 fi +################################################################################ +############################ Parse CLI parameters ############################## +################################################################################ + framework="" +project="" + +while [[ ${#} -gt 0 ]]; do + arg="${1}" -while [[ $# -gt 0 ]]; do -key="$1" -case $key in + case "${arg}" in + --project) + if [[ ! -d "${2}" ]]; then echo "'${arg}' takes a directory. '${2}' is not a directory"; exit 1; fi + project="${2}" + shift + ;; -m) - pytest_m="$2" - shift - ;; + pytest_m="${2}" + shift + ;; -k) - pytest_k="$2" - shift - ;; + pytest_k="${2}" + shift + ;; -s) - security="strict" - ;; + security="strict" + ;; -o|--open) - enterprise="false" - ;; + enterprise="false" + ;; -p) - if [[ ! -f "$2" ]]; then echo "File not found: -p $2"; exit 1; fi - ssh_path="$2" - shift - ;; + if [[ ! -f "${2}" ]]; then echo "File not found: ${arg} ${2}"; exit 1; fi + ssh_path="${2}" + shift + ;; -l) - ssh_user="$2" - shift - ;; + ssh_user="${2}" + shift + ;; -e) - env_passthrough="$2" - shift - ;; - --envfile) - if [[ ! -f "$2" ]]; then echo "File not found: $key $2"; exit 1; fi - envfile_input="$2" - shift - ;; + env_passthrough="${2}" + shift + ;; + --env_file) + if [[ ! -f "${2}" ]]; then echo "File not found: ${arg} ${2}"; exit 1; fi + env_file_input="${2}" + shift + ;; -i|--interactive) - if [[ x"$headless" == x"true" ]]; then echo "Cannot enable both --headless and --interactive: Disallowing background prompt that runs forever."; exit 1; fi - interactive="true" - ;; + if [[ "${headless}" == "true" ]]; then echo "Cannot enable both --headless and --interactive: Disallowing background prompt that runs forever."; exit 1; fi + interactive="true" + ;; --headless) - if [[ x"$interactive" == x"true" ]]; then echo "Cannot enable both --headless and --interactive: Disallowing background prompt that runs forever."; exit 1; fi - headless="true" - ;; + if [[ "${interactive}" == "true" ]]; then echo "Cannot enable both --headless and --interactive: Disallowing background prompt that runs forever."; exit 1; fi + headless="true" + ;; --package-registry) - package_registry="true" - ;; + package_registry="true" + ;; --dcos-files-path) - if [[ ! -d "$2" ]]; then echo "Directory not found: --dcos-files-path $2"; exit 1; fi - # Resolve abs path: - dcos_files_path="$( cd "$( dirname "$2" )" && pwd )/$(basename "$2")" - shift - ;; + if [[ ! -d "${2}" ]]; then echo "Directory not found: ${arg} ${2}"; exit 1; fi + # Resolve abs path: + dcos_files_path="$(cd "$(dirname "${2}")" && pwd)/$(basename "${2}")" + shift + ;; --gradle-cache) - if [[ ! -d "$2" ]]; then echo "Directory not found: --gradle-cache $2"; exit 1; fi - gradle_cache="$2" - shift - ;; + if [[ ! -d "${2}" ]]; then echo "Directory not found: ${arg} ${2}"; exit 1; fi + gradle_cache="${2}" + shift + ;; -a|--aws) - if [[ ! -f "$2" ]]; then echo "File not found: -a/--aws $2"; exit 1; fi - aws_creds_path="$2" - shift - ;; + if [[ ! -f "${2}" ]]; then echo "File not found: ${arg} ${2}"; exit 1; fi + aws_credentials_path="${2}" + shift + ;; --aws-profile) - aws_profile="$2" - shift - ;; + aws_profile="${2}" + shift + ;; -*) - echo "Unknown option: $key" - usage - exit 1 - ;; + echo "Unknown option: ${arg}" + usage + exit 1 + ;; *) - if [[ -n "$framework" ]]; then echo "Multiple frameworks specified, please only specify one at a time: $framework $@"; exit 1; fi - framework=$key - ;; -esac -shift # past argument or value + if [[ -n "${framework}" ]]; then echo "Multiple frameworks specified, please only specify one at a time: ${framework} ${*}"; exit 1; fi + framework="${arg}" + ;; + esac + + shift done -if [ -z "$framework" -a x"$interactive" != x"true" -a x"$DOCKER_COMMAND" == x"" ]; then - # If FRAMEWORK_LIST only has one option, use that. Otherwise complain. - if [ $(echo $FRAMEWORK_LIST | wc -w) == 1 ]; then - framework=$FRAMEWORK_LIST - else - echo "Multiple frameworks in $(basename $REPO_ROOT_DIR)/frameworks/, please specify one to test: $FRAMEWORK_LIST" - exit 1 - fi -elif [ "$framework" = "all" ]; then - echo "'all' is no longer supported. Please specify one framework to test: $FRAMEWORK_LIST" - exit 1 +################################################################################ +############################ Parse CLI parameters ############################## +################################################################################ + +if [ -n "${gradle_cache}" ]; then + echo "Setting Gradle cache to ${gradle_cache}" + container_volumes="${container_volumes} -v ${gradle_cache}:/root/.gradle" fi -volume_args="-v ${REPO_ROOT_DIR}:$WORK_DIR" +if [ "${interactive}" == "true" ]; then + docker_command="bash" +fi -if [ -z "$CLUSTER_URL" -a x"$interactive" == x"true" ]; then - CLUSTER_URL="$(dcos config show core.dcos_url)" - echo "CLUSTER_URL not specified. Using attached cluster ${CLUSTER_URL} in interactive mode" +# Some automation contexts (e.g. Jenkins) will be unhappy if STDIN is not +# available. The --headless command accommodates such contexts. +if [ "${headless}" != "true" ]; then + docker_interactive_arg="-i" fi -# Configure SSH key for getting into the cluster during tests -if [ -f "$ssh_path" ]; then - volume_args="$volume_args -v $ssh_path:/ssh/key" # pass provided key into docker env -else - if [ -n "$CLUSTER_URL" ]; then - # If the user is providing us with a cluster, we require the SSH key for that cluster. - echo "SSH key not found at $ssh_path. Use -p to customize this path." - echo "An SSH key is required for communication with the provided CLUSTER_URL=$CLUSTER_URL" - exit 1 - fi - # Don't need ssh key now: test_runner.sh will extract the key after cluster launch +if [ -n "${dcos_files_path}" ]; then + container_volumes="${container_volumes} -v ${dcos_files_path}:${dcos_files_path}" fi -# Configure the AWS credentials profile -if [ -n "${aws_profile}" ]; then - echo "Using provided --aws-profile: ${aws_profile}" -elif [ -n "$AWS_PROFILE" ]; then - echo "Using provided AWS_PROFILE: $AWS_PROFILE" - aws_profile=$AWS_PROFILE -elif [ -f "${aws_creds_path}" ]; then - # Check the creds file. If there's exactly one profile, then use that profile. - available_profiles=$(grep -oE '^\[\S+\]' $aws_creds_path | tr -d '[]') # find line(s) that look like "[profile]", remove "[]" - available_profile_count=$(echo "$available_profiles" | wc -l) - if [ "$available_profile_count" == "1" ]; then - aws_profile=$available_profiles - echo "Using sole profile in $aws_creds_path: $aws_profile" - else - echo "Expected 1 profile in $aws_creds_path, found $available_profile_count: ${available_profiles}" - echo "Please specify --aws-profile or \$AWS_PROFILE to select a profile" - exit 1 - fi -else - echo "No AWS profile specified, using 'default'" - aws_profile="default" +################################################################################ +################################### pytest ##################################### +################################################################################ + +if [ -n "${pytest_k}" ]; then + if [ -n "${PYTEST_ARGS}" ]; then + PYTEST_ARGS="${PYTEST_ARGS} " + fi + PYTEST_ARGS="${PYTEST_ARGS}-k \"${pytest_k}\"" fi +if [ -n "${pytest_m}" ]; then + if [ -n "${PYTEST_ARGS}" ]; then + PYTEST_ARGS="${PYTEST_ARGS} " + fi + PYTEST_ARGS="${PYTEST_ARGS}-m \"${pytest_m}\"" +fi + +################################################################################ +######### Set up "project", "framework" and "cluster" related variables ######## +################################################################################ + +# Set project root, defaulting to the dcos-commons directory. +PROJECT_ROOT="${project:-${DCOS_COMMONS_DIRECTORY}}" -# Write the AWS credential file (deleted on script exit) -if [ -f "${aws_creds_path}" ]; then - aws_credential_file_mount_target="${aws_creds_path}" +# Find out which framework(s) are available. +# +# - If there's a "$PROJECT_ROOT/frameworks" directory, get its children. +# - Otherwise just use the name of the repo directory. +# +# If there's multiple options, the user needs to pick one. If there's only one +# option then we'll use that automatically. +if [ -d "${PROJECT_ROOT}/frameworks" ]; then + FRAMEWORK_LIST=$(find "${PROJECT_ROOT}/frameworks" -maxdepth 1 -mindepth 1 -type d | sort | xargs echo -n) else - # CI environments may have creds in AWS_DEV_* envvars, map them to AWS_*: - if [ -n "${AWS_DEV_ACCESS_KEY_ID}" -a -n "${AWS_DEV_SECRET_ACCESS_KEY}}" ]; then - AWS_ACCESS_KEY_ID="${AWS_DEV_ACCESS_KEY_ID}" - AWS_SECRET_ACCESS_KEY="${AWS_DEV_SECRET_ACCESS_KEY}" - fi - # Check AWS_* envvars for credentials, create temp creds file using those credentials: - if [ -n "${AWS_ACCESS_KEY_ID}" -a -n "${AWS_SECRET_ACCESS_KEY}}" ]; then - echo "Writing AWS env credentials to temporary file: $tmp_aws_creds_path" - cat > $tmp_aws_creds_path <> "${env_file}" <<-EOF + GIT_DIR=${container_dcos_commons_git_dir} + GIT_WORK_TREE=${container_dcos_commons_git_dir} + EOF fi -# Some automation contexts (e.g. Jenkins) will be unhappy if STDIN is not available. The --headless command accomodates such contexts. -if [ x"$headless" != x"true" ]; then - docker_interactive_arg="-i" +if [ -z "${CLUSTER_URL}" ] && [ "${interactive}" == "true" ]; then + CLUSTER_URL="$(dcos config show core.dcos_url)" + echo "CLUSTER_URL not specified. Using attached cluster '${CLUSTER_URL}' in interactive mode" fi -if [ -n "$pytest_k" ]; then - if [ -n "$PYTEST_ARGS" ]; then - PYTEST_ARGS="$PYTEST_ARGS " - fi - PYTEST_ARGS="$PYTEST_ARGS-k \"$pytest_k\"" +################################################################################ +######################### Configure cluster SSH key ############################ +################################################################################ + +if [ -f "${ssh_path}" ]; then + container_volumes="${container_volumes} -v ${ssh_path}:/ssh/key" +else + if [ -n "${CLUSTER_URL}" ]; then + # If the user is providing us with a cluster, we require the SSH key for that cluster. + echo "SSH key not found at '${ssh_path}'. Use -p /path/to/id_rsa to customize this path." + echo "An SSH key is required for communication with the provided CLUSTER_URL=${CLUSTER_URL}" + exit 1 + fi + # Don't need ssh key now: test_runner.sh will extract the key after cluster + # launch. fi -if [ -n "$pytest_m" ]; then - if [ -n "$PYTEST_ARGS" ]; then - PYTEST_ARGS="$PYTEST_ARGS " - fi - PYTEST_ARGS="$PYTEST_ARGS-m \"$pytest_m\"" + +################################################################################ +###################### Configure AWS credentials profile ####################### +################################################################################ + +if [ -n "${aws_profile}" ]; then + echo "Using provided --aws-profile: ${aws_profile}" +elif [ -n "${AWS_PROFILE}" ]; then + echo "Using provided AWS_PROFILE: ${AWS_PROFILE}" + aws_profile="${AWS_PROFILE}" +elif [ -f "${aws_credentials_path}" ]; then + # If the credentials file has exactly one profile, use that profile. + available_profiles="$(grep -oE '^\[\S+\]' "${aws_credentials_path}" | tr -d '[]')" # Find line(s) that look like "[profile]", remove "[]". + available_profile_count="$(echo "${available_profiles}" | wc -l)" + if [ "${available_profile_count}" == "1" ]; then + aws_profile="${available_profiles}" + echo "Using sole profile in ${aws_credentials_path}: ${aws_profile}" + else + echo "Expected 1 profile in '${aws_credentials_path}', found ${available_profile_count}: ${available_profiles}" + echo "Please specify --aws-profile or \$AWS_PROFILE to select a profile" + exit 1 + fi +else + echo "No AWS profile specified, using 'default'" + aws_profile="default" fi -if [ -n "$dcos_files_path" ]; then - volume_args="$volume_args -v ${dcos_files_path}:${dcos_files_path}" +if [ -f "${aws_credentials_path}" ]; then + aws_credentials_file_mount_source="${aws_credentials_path}" +else + # CI environments may have AWS credentials in AWS_DEV_* envvars, map them to + # AWS_*. + if [ -n "${AWS_DEV_ACCESS_KEY_ID}" ] \ + && [ -n "${AWS_DEV_SECRET_ACCESS_KEY}" ]; then + AWS_ACCESS_KEY_ID="${AWS_DEV_ACCESS_KEY_ID}" + AWS_SECRET_ACCESS_KEY="${AWS_DEV_SECRET_ACCESS_KEY}" + fi + + # Check AWS_* envvars for credentials, create temp creds file using those + # credentials. + if [ -n "${AWS_ACCESS_KEY_ID}" ] && [ -n "${AWS_SECRET_ACCESS_KEY}" ]; then + echo "Writing AWS credentials to temporary file: ${tmp_aws_credentials_file}" + cat > "${tmp_aws_credentials_file}" <<-EOF + [${aws_profile}] + aws_access_key_id = ${AWS_ACCESS_KEY_ID} + aws_secret_access_key = ${AWS_SECRET_ACCESS_KEY} + EOF + else + echo "Missing AWS credentials file (${aws_credentials_path}) and AWS env (AWS_ACCESS_KEY_ID/AWS_SECRET_ACCESS_KEY)" + exit 1 + fi + + aws_credentials_file_mount_source="${tmp_aws_credentials_file}" fi -if [ -n "$TEAMCITY_VERSION" ]; then - # The teamcity python module treats present-but-empty as enabled. - # We must therefore completely omitted this envvar to disable teamcity handling. - echo "TEAMCITY_VERSION=\"${TEAMCITY_VERSION}\"" >> $envfile +container_volumes="${container_volumes} -v ${aws_credentials_file_mount_source}:/root/.aws/credentials:ro" + +################################################################################ +############################# Build ENV file ################################### +################################################################################ + +if [ -n "${TEAMCITY_VERSION}" ]; then + # The teamcity python module treats present-but-empty as enabled. We must + # therefore completely omit this envvar to disable teamcity handling. + echo "TEAMCITY_VERSION=\"${TEAMCITY_VERSION}\"" >> "${env_file}" fi -if [ -n "$azure_enabled" ]; then - cat >> $envfile <> "${env_file}" <<-EOF + AZURE_CLIENT_ID="${AZURE_DEV_CLIENT_ID}" + AZURE_CLIENT_SECRET="${AZURE_DEV_CLIENT_SECRET}" + AZURE_TENANT_ID="${AZURE_DEV_TENANT_ID}" + AZURE_STORAGE_ACCOUNT="${AZURE_DEV_STORAGE_ACCOUNT}" + AZURE_STORAGE_KEY="${AZURE_DEV_STORAGE_KEY}" + EOF fi -cat >> $envfile <> "${env_file}" <<-EOF + AWS_PROFILE=${aws_profile} + CLUSTER_URL=${CLUSTER_URL} + DCOS_ENTERPRISE=${enterprise} + DCOS_FILES_PATH=${dcos_files_path} + DCOS_LOGIN_PASSWORD=${DCOS_LOGIN_PASSWORD} + DCOS_LOGIN_USERNAME=${DCOS_LOGIN_USERNAME} + DCOS_SSH_USERNAME=${ssh_user} + FRAMEWORK=${framework} + PACKAGE_REGISTRY_ENABLED=$package_registry + PYTEST_ARGS=${PYTEST_ARGS} + PYTHONPATH=${WORK_DIR}/testing + S3_BUCKET=${S3_BUCKET} + SECURITY=${security} + STUB_UNIVERSE_URL=${STUB_UNIVERSE_URL} EOF -while read line; do - # Prefix match, then strip prefix in envfile: - if [[ "${line:0:8}" = "TEST_SH_" ]]; then - echo ${line#TEST_SH_} >> $envfile - fi +while read -r line; do + # Prefix match, then strip prefix in env_file. + if [[ "${line:0:8}" = "TEST_SH_" ]]; then + echo "${line#TEST_SH_}" >> "${env_file}" + fi done < <(env) -if [ -n "$env_passthrough" ]; then - # If the -e flag is specified, add the ENVVAR lines for the - # comma-separated list of envvars - for envvar_name in ${env_passthrough//,/ }; do - echo "$envvar_name" >> $envfile - done +if [ -n "${env_passthrough}" ]; then + # If the -e flag is specified, add the ENVVAR lines for the comma-separated + # list of envvars. + for envvar_name in ${env_passthrough//,/ }; do + echo "${envvar_name}" >> "${env_file}" + done fi -if [ -n "$envfile_input" ]; then - cat "${envfile_input}" >> $envfile +if [ -n "${env_file_input}" ]; then + cat "${env_file_input}" >> "${env_file}" fi -CMD="docker run --rm \ --t \ -${docker_interactive_arg} \ ---env-file $envfile \ -${volume_args} \ --w $WORK_DIR \ -${docker_image} \ -${docker_command}" +################################################################################ +######################### Prepare and run command ############################## +################################################################################ + +docker pull "${docker_image}" + +set +x + +CMD="docker run + --rm + -t + ${docker_interactive_arg} + --env-file ${env_file} + -w ${WORK_DIR} + ${container_volumes} + ${docker_options} + ${docker_image} + ${docker_command}" -echo "===" +echo "================================================================================" echo "Docker command:" -echo " $CMD" -echo "" -echo "Environment:" -while read line; do - echo " $line" -done <$envfile -echo "===" - -$CMD +# shellcheck disable=SC2001 +# https://github.com/koalaman/shellcheck/wiki/SC2001 +echo -e " $(echo "${CMD}" | sed 's/\([[:alpha:]]\) -v/\1\\n -v/g')" +echo +echo "Environment in --env-file '${env_file}':" +while read -r line; do + echo " ${line}" +done < "${env_file}" +echo "================================================================================" + +set -x + +# shellcheck disable=SC2086 +# https://github.com/koalaman/shellcheck/wiki/SC2086 +eval ${CMD} diff --git a/tools/ci/checks/get_base_branch.sh b/tools/ci/checks/get_base_branch.sh index 61630d093f6..39d414d2817 100755 --- a/tools/ci/checks/get_base_branch.sh +++ b/tools/ci/checks/get_base_branch.sh @@ -1,52 +1,53 @@ -#!/bin/bash +#!/usr/bin/env bash set -e -current_branch="${CURRENT_GIT_BRANCH:-$( git symbolic-ref --short HEAD )}" - -base_branch="master" - -if [[ x"$current_branch" == x*"pull/"* ]]; then - # This is a PR and we need to determine the branch from the API. - pr_name="${current_branch/pull/pulls}" - - if [ -z ${GIT_REPO} ]; then - set -x - git_repo="$( git remote get-url origin )" - git_repo="$( echo "${git_repo}" | sed -e 's/.*github\.com[:\/]//g' )" - GIT_REPO="${git_repo//.git/}" - set +x - fi - - REPO_URL="https://api.github.com/repos/${GIT_REPO}/${pr_name}" - CURL_ARGS="--silent --retry 3" - if [ -n "${GITHUB_TOKEN}" ]; then - output="$( curl ${CURL_ARGS} --header "Authorization: token ${GITHUB_TOKEN}" "${REPO_URL}" )" +current_branch="${CURRENT_GIT_BRANCH:-$(git symbolic-ref --short HEAD)}" + +if [[ "${current_branch}" == *"pull/"* ]]; then + # This is a PR and we need to determine the branch from the API. + pr_name="${current_branch/pull/pulls}" + + if [ -z "${GIT_REPO}" ]; then + set -x + git_repo="$(git remote get-url origin)" + git_repo="$(echo "${git_repo}" | sed -e 's/.*github\.com[:\/]//g')" + GIT_REPO="${git_repo//.git/}" + set +x + fi + + REPO_URL="https://api.github.com/repos/${GIT_REPO}/${pr_name}" + CURL_ARGS="--silent --retry 3" + if [ -n "${GITHUB_TOKEN}" ]; then + output="$(curl ${CURL_ARGS} --header "Authorization: token ${GITHUB_TOKEN}" "${REPO_URL}")" + else + output="$(curl ${CURL_ARGS} "${REPO_URL}")" + fi + + # Note, curl does not return success/failure based on the HTTP code. + # Check for a valid return value by retrieving the ID. + pr_id="$(echo "$output" | jq -r .id)" + if [ x"$pr_id" == x"null" ]; then + # Check for a message. + message="$(echo "$output" | jq -r .message)" + + if [ "${message}" == "Not Found" ]; then + echo "The specified PR (${git_repo}/${pr_name}) could not be found" + exit 1 else - output="$( curl ${CURL_ARGS} "${REPO_URL}" )" - fi - - # Note, curl does not return success/failure based on the HTTP code. - # Check for a valid return value by retrieving the ID. - pr_id="$( echo "$output" | jq -r .id )" - if [ x"$pr_id" == x"null" ]; then - # Check for a message - message="$( echo "$output" | jq -r .message )" - if [ x"$message" == x"Not Found" ]; then - echo "The specified PR (${git_repo}/${pr_name}) could not be found" - exit 1 - else - echo "The cURL output could not be parsed:" - echo "$output" - fi - exit 1 + echo "The cURL output could not be parsed:" + echo "${output}" fi + exit 1 + fi - base_branch="$( echo "$output" | jq -r .base.ref )" - current_branch="$( echo "$output" | jq -r .head.ref )" + base_branch="$(echo "${output}" | jq -r .base.ref)" + current_branch="$(echo "${output}" | jq -r .head.ref)" - # Fetch the base branch to ensure that it is available locally - git fetch origin ${base_branch}:${base_branch} + # Fetch the base branch to ensure that it is available locally. + git fetch origin "${base_branch}:${base_branch}" +else + base_branch="master" fi echo "${base_branch}" diff --git a/tools/ci/checks/run_pre_commit.sh b/tools/ci/checks/run_pre_commit.sh index bef6878f122..a3b9b7830fa 100755 --- a/tools/ci/checks/run_pre_commit.sh +++ b/tools/ci/checks/run_pre_commit.sh @@ -1,10 +1,34 @@ -#!/bin/bash +#!/usr/bin/env bash -DOCKER_TAG=${DOCKER_TAG:-latest} -DOCKER_IMAGE=${DOCKER_IMAGE:-mesosphere/dcos-commons:${DOCKER_TAG}} +set -euxo pipefail -docker run --rm -t \ - -v $(pwd):/build:ro \ - -w /build \ - ${DOCKER_IMAGE} \ - pre-commit run "$@" +SCRIPT_DIRECTORY="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" +DCOS_COMMONS_DIRECTORY="$(cd "${SCRIPT_DIRECTORY}/../../.." && pwd)" + +function was_run_from_submodule () { + ! (cd "${DCOS_COMMONS_DIRECTORY}" && test -d .git) +} + +if was_run_from_submodule; then + PROJECT_DIRECTORY="$(cd "${DCOS_COMMONS_DIRECTORY}/.." && pwd)" + PROJECT_ARGUMENTS="--project ${PROJECT_DIRECTORY}" +else + PROJECT_DIRECTORY="${DCOS_COMMONS_DIRECTORY}" + PROJECT_ARGUMENTS="" +fi + +DOCKER_TAG="${DOCKER_TAG:-latest}" +DOCKER_IMAGE="${DOCKER_IMAGE:-mesosphere/dcos-commons:${DOCKER_TAG}}" +# shellcheck disable=SC2124,SC2089 +# https://github.com/koalaman/shellcheck/wiki/SC2124 +# https://github.com/koalaman/shellcheck/wiki/SC2089 +DOCKER_COMMAND="bash -c \" + set -x; + pre-commit run --verbose ${*} +\"" + +export DOCKER_IMAGE +# shellcheck disable=SC2090 +export DOCKER_COMMAND + +exec "${DCOS_COMMONS_DIRECTORY}/run_container.sh" --headless ${PROJECT_ARGUMENTS} diff --git a/tools/ci/launch_cluster.sh b/tools/ci/launch_cluster.sh index 355cc8943e8..3151c6c0a86 100755 --- a/tools/ci/launch_cluster.sh +++ b/tools/ci/launch_cluster.sh @@ -7,6 +7,8 @@ set -e LAUNCH_SUCCESS="False" RETRY_LAUNCH="True" +env + while [ x"${LAUNCH_SUCCESS}" == x"False" ]; do rm -f ${CLUSTER_INFO_FILE} # dcos-launch complains if the file already exists /venvs/wrap.sh dcos-launch dcos-launch create --config-path=${LAUNCH_CONFIG_FILE} --info-path=${CLUSTER_INFO_FILE} @@ -15,7 +17,7 @@ while [ x"${LAUNCH_SUCCESS}" == x"False" ]; do else set -e fi - /venvs/wrap.sh dcos-launch dcos-launch wait --info-path=${CLUSTER_INFO_FILE} 2>&1 | tee dcos-launch-wait-output.stdout + /venvs/wrap.sh dcos-launch wait --info-path=${CLUSTER_INFO_FILE} 2>&1 | tee dcos-launch-wait-output.stdout # Grep exits with an exit code of 1 if no lines are matched. We thus need to # disable exit on errors. diff --git a/tools/ci/steps/check_python_files.sh b/tools/ci/steps/check_python_files.sh index 74850bf04c4..8e5ebdc30a4 100755 --- a/tools/ci/steps/check_python_files.sh +++ b/tools/ci/steps/check_python_files.sh @@ -1,27 +1,29 @@ -#!/bin/bash -# This script is used by the build system to check MODIFIED Python files in the repository. +#!/usr/bin/env bash + +# This script is used by the build system to check all Python files in the +# repository. # -# By default, the BASE_BRANCH is determined using the get_base_branch script, but this -# can be overridden by setting the BASE_BRANCH environment variable before invoking this -# script +# By default, the BASE_BRANCH is determined using the get_base_branch script, +# but this can be overridden by setting the BASE_BRANCH environment variable +# before invoking this script -TOOL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )/../checks" && pwd )" +set -x -# Determine the target branch for the diff calculation -BASE_BRANCH=${BASE_BRANCH:-$( ${TOOL_DIR}/get_base_branch.sh )} +TOOL_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}" )/../checks" && pwd)" -# Get the list of changed .py files relative to the base branch -CHANGESET="$( ${TOOL_DIR}/get_applicable_changes.py --extensions ".py" --from-git "${BASE_BRANCH}" )" +# Determine the target branch for the diff calculation. +BASE_BRANCH="${BASE_BRANCH:-$("${TOOL_DIR}/get_base_branch.sh")}" -FAILURES= -if [[ -n ${CHANGESET} ]]; then +# Get the list of changed .py files relative to the base branch. +CHANGESET="$("${TOOL_DIR}/get_applicable_changes.py" --extensions ".py" --from-git "${BASE_BRANCH}")" - echo "Changeset:" - echo "${CHANGESET}" +if [[ -n ${CHANGESET} ]]; then + echo "Changeset:" + echo "${CHANGESET}" - ${TOOL_DIR}/run_pre_commit.sh --files ${CHANGESET} + exec "${TOOL_DIR}/run_pre_commit.sh" --files ${CHANGESET} - exit $? + exit $? else - echo "No Python files in changeset." + echo "No Python files in changeset." fi diff --git a/tools/ci/test_runner.sh b/tools/ci/test_runner.sh index 77ef2c6d1ee..1f746413cc9 100755 --- a/tools/ci/test_runner.sh +++ b/tools/ci/test_runner.sh @@ -10,7 +10,7 @@ export PACKAGE_REGISTRY_ENABLED export PACKAGE_REGISTRY_STUB_URL export DCOS_FILES_PATH -BUILD_TOOL_DIR="$( cd "$( dirname "${BASH_SOURCE[0]}" )" && pwd )" +BUILD_TOOL_DIR="$(cd "$(dirname "${BASH_SOURCE[0]}")" && pwd)" REPO_ROOT_DIR="${REPO_ROOT:-$1}" SINGLE_FRAMEWORK="True" @@ -75,7 +75,7 @@ function get_public_master_url() for attempt in $(seq 1 ${attempts}); do # Careful to not use a pipeline! if /venvs/wrap.sh dcos-launch dcos-launch describe --info-path="${REPO_ROOT_DIR}/cluster_info.json" > "${cluster_description_file}" && - master_ip=$(jq --raw-output --exit-status '.masters[0].public_ip' < "${cluster_description_file}") + master_ip=$(jq --raw-output --exit-status '.masters[0].public_ip' < "${cluster_description_file}") then echo "https://${master_ip}" return 0