diff --git a/Makefile.am b/Makefile.am index e6b66ace..d50288d7 100644 --- a/Makefile.am +++ b/Makefile.am @@ -69,6 +69,7 @@ GENERATED_FILES = \ tests/metrics/network/network-metrics-memory-pss.sh \ tests/metrics/network/network-metrics-memory-rss-1g.sh \ tests/metrics/network/network-metrics-memory-pss-1g.sh \ + tests/lib/send_results.sh \ tests/lib/test-common.bash $(GENERATED_FILES): %: %.in Makefile diff --git a/tests/lib/send_results.sh.in b/tests/lib/send_results.sh.in new file mode 100755 index 00000000..0aa852a4 --- /dev/null +++ b/tests/lib/send_results.sh.in @@ -0,0 +1,340 @@ +#!/bin/bash + +# This file is part of cc-oci-runtime. +# +# Copyright (C) 2017 Intel Corporation +# +# This program is free software; you can redistribute it and/or +# modify it under the terms of the GNU General Public License +# as published by the Free Software Foundation; either version 2 +# of the License, or (at your option) any later version. +# +# This program is distributed in the hope that it will be useful, +# but WITHOUT ANY WARRANTY; without even the implied warranty of +# MERCHANTABILITY or FITNESS FOR A PARTICULAR PURPOSE. See the +# GNU General Public License for more details. +# +# You should have received a copy of the GNU General Public License +# along with this program; if not, write to the Free Software +# Foundation, Inc., 51 Franklin Street, Fifth Floor, Boston, MA 02110-1301, USA. +# +# This script will store the results into csv files in the +# metrics 'results' directory. +# The csv file names are derived from the reported 'test name', and all +# results for a single 'test name' are collated (appended) into a single csv. + +# General env +MYSELF="${0##*/}" +SCRIPT_PATH=$(dirname $(readlink -f $0)) +source "${SCRIPT_PATH}/test-common.bash" + +# Override the RESULT_DIR from the test-common.bash for the moment. +# Once all all tests are migrated to use this script, remove the +# RESULT_DIR definition from test-common.bash, as all results should +# be then stored via this script +RESULT_DIR="${SCRIPT_PATH}/../metrics/results" + +GROUP="PNP" +CC_DEFAULT_IMG_PATH="/usr/share/clear-containers" +CONTAINERS_IMG_SYSTEM="@CONTAINERS_IMG@" +CONTAINERS_IMG_GUESS="${CC_DEFAULT_IMG_PATH}/clear-containers.img" +CONTAINER_KERNEL_SYSTEM="@CONTAINER_KERNEL@" +CONTAINER_KERNEL_GUESS="${CC_DEFAULT_IMG_PATH}/vmlinux.container" +SYSTEM_CLEAR_RELEASE_FILE="/usr/lib/os-release" +SYSTEM_CENTOS_RELEASE_FILE="/etc/os-release" + +declare TEST_NAME +declare RESULT +declare ARGS +declare UNITS +declare SYS_VERSION +declare SYSTEM +declare TAG +declare HW +declare SEND +declare KERNEL +declare IMG + +# Parse the platform name +function get_platform_name() +{ + com="core" + + model="$(cat /proc/cpuinfo | grep "model name" | uniq | \ + awk -F ": " '{print $2}' | sed s/\(R\)//g | \ + sed s/\(TM\)//g | sed s/[[:space:]]*CPU[[:space:]]*/" "/g | \ + cut -d"@" -f1 | sed s/[[:space:]]*$//g)" + + num_cores=$(nproc) + + if (( $num_cores > 1 ));then + com="cores" + fi + + echo "$model ($num_cores $com)" +} + +# Locate the cor runtime binary. If unknown or unfound, return nothing +function locate_runtime() +{ + # For now use the build installed path for the runtime + # Ultimately, it would be good if we could ask/extract from + # docker itself where the runtime it is using lives + if [ -f "${bindir}/cc-oci-runtime" ];then + echo "${bindir}/cc-oci-runtime" + else + # And if we cannot find it at the default, can we find it + # in the path + runtime="$(which cc-oci-runtime)" + if [ ! -z "$runtime" ]; then + echo "$runtime" + fi + fi + +} + +function find_system_name() +{ + os="$(cat "$SYSTEM_RELEASE_FILE" | grep -w "ID" | cut -d "=" -f2)" + + if [ -z "$os" ];then + os="Unknown" + fi + + echo "$os" +} + +function find_system_version() +{ + version="$(cat "$SYSTEM_RELEASE_FILE" | grep "VERSION_ID"| cut -d "=" -f2 | sed s/\"//g)" + + if [ -z "$version" ];then + version="Unknown" + fi + + echo "$version" +} + +# Try to find the img file we are running with. +function locate_containers_img() +{ + # This is the location the build/install is configured for + res="$CONTAINERS_IMG_SYSTEM" + + if [ ! -f "$res" ];then + # but, sometimes we are running the tests on an already installed + # system - try a 'backup' path + # This is far far from perfect. + # Ideally we'd ask the test runner or test or docker which runtime + # and kernel and img (if appropriate for the runtime) were used + res="$CONTAINERS_IMG_GUESS" + + if [ ! -f "$res" ];then + res="" + fi + fi + + echo "$res" +} + +# Try to find the VM container kernel that was used +function locate_container_kernel() +{ + # This is the location the build/install is configured for + res="$CONTAINER_KERNEL_SYSTEM" + + if [ ! -f "$res" ];then + # but, sometimes we are running the tests on an already installed + # system - try a 'backup' path + # This is far far from perfect. + # Ideally we'd ask the test runner or test or docker which runtime + # and kernel and img (if appropriate for the runtime) were used + res="$CONTAINER_KERNEL_GUESS" + + if [ ! -f "$res" ];then + res="" + fi + fi + + echo "$res" +} + +function save_to_csv() +{ + + if [ -z "$TEST_NAME" ];then + die "test name argument not supplied" + fi + + if [ -z "$RESULT" ];then + die "result argument not supplied" + fi + + if [ -z "$UNITS" ];then + die "units argument not supplied" + fi + + if [ -z "$HW" ];then + platform="$(get_platform_name)" + HW="$platform" + fi + + if [ -z "$IMG" ];then + CONTAINERS_IMG="$(locate_containers_img)" + IMG="$(readlink "$CONTAINERS_IMG")" + fi + + if [ -z "$KERNEL" ];then + CONTAINER_KERNEL="$(locate_container_kernel)" + KERNEL="$(readlink "$CONTAINER_KERNEL")" + fi + + if [ -z "$TAG" ];then + # This is somewhat imperfect. Ideally we'd have knowledge passed + # in from the test itself about which runtime it used so we could + # be certain to have the correct runtime and extract the correct + # commit id + runtime="$(locate_runtime)" + if [ -f "$runtime" ];then + commit="$($runtime -v | tail -n1 | sed s/"commit: "//g)" + TAG="$commit" + else + TAG="unknown" + fi + fi + + if [ -z "$SYSTEM" ];then + SYSTEM="$(find_system_name)" + fi + + if [ -z "$SYS_VERSION" ];then + SYS_VERSION="$(find_system_version)" + fi + + if [ -z "$ARGS" ];then + ARGS="none" + fi + + # Generate the file name from the test name - replace spaces and path chars + # to hyphens + CSV_FILE=${RESULT_DIR}/$(echo ${TEST_NAME} | sed 's/[ \/]/-/g').csv + + if [ ! -d ${RESULT_DIR} ];then + mkdir -p ${RESULT_DIR} + fi + + timestamp="$(date +%s)" + + # If this is the first write to the file, start with the column header + if [ ! -f ${CSV_FILE} ];then + s0=$(echo "Timestamp,Group,Name,Args,Result,Units,System,SystemVersion,Platform,Image,Kernel,Commit") + if [ -z "$SEND" ];then + echo "${s0}" > "${CSV_FILE}" + else + echo "Would have done: echo ${s0} > ${CSV_FILE}" + fi + fi + + # A bit horrid - but quote the values in the CSV just in case one has an embedded comma + s1=$(echo "\"$timestamp\",\"$GROUP\",\"$TEST_NAME\",\"$ARGS\",\"$RESULT\",\"$UNITS\",\"$SYSTEM\",\"$SYS_VERSION\",\"$platform\",\"$IMG\",\"$KERNEL\",\"$commit\"") + + if [ -z "$SEND" ];then + echo "${s1}" >> "${CSV_FILE}" + else + echo "Would have done: echo ${s1} > ${CSV_FILE}" + fi +} + +function help() +{ + usage=$(cat << EOF +Usage: $MYSELF [-h] [--help] [-v] [--version] + Description: + This tool will save results to csv files. + Options: + -a Test arguments + -d Dry-run + -g Group (by default PNP) + -h Help page + -i Clear containers image + -k Clear containers kernel image + -n Test name (Mandatory) + -o OS version + -r Test Results (Mandatory) + -s Name of OS + -t Git commit + -u Test units, example: secs, ms, KB (Mandatory) + -v Show version + -w (Hardware | platform) name +EOF +) + echo "$usage" +} + +function main() +{ + local OPTIND + while getopts ":a:dg:hi:k:n:o:r:s:t:u:vw: " opt;do + case ${opt} in + a) + ARGS="${OPTARG}" + ;; + d) SEND="No" + ;; + g) + GROUP="${OPTARG}" + ;; + h) + help + exit 0; + ;; + i) IMG="${OPTARG}" + ;; + k) KERNEL="${OPTARG}" + ;; + n) + TEST_NAME="${OPTARG}" + ;; + o) + SYS_VERSION="${OPTARG}" + ;; + r) + RESULT="${OPTARG}" + ;; + s) + SYSTEM="${OPTARG}" + ;; + t) TAG="${OPTARG}" + ;; + u) + UNITS="${OPTARG}" + ;; + v) + echo "$MYSELF version 0.1" + exit 0; + ;; + w) HW="${OPTARG}" + ;; + esac + done + shift $((OPTIND-1)) + + # do some prep work to locate files + # Try to figure out where the system release file is + if [ -f $SYSTEM_CLEAR_RELEASE_FILE ];then + SYSTEM_RELEASE_FILE="$SYSTEM_CLEAR_RELEASE_FILE" + fi + + if [ -f $SYSTEM_CENTOS_RELEASE_FILE ];then + SYSTEM_RELEASE_FILE="$SYSTEM_CENTOS_RELEASE_FILE" + fi + + if [ -z "$SYSTEM_RELEASE_FILE" ];then + die "Cannot locate system release file" + fi + + save_to_csv +} + +# call main +main "$@" diff --git a/tests/lib/test-common.bash.in b/tests/lib/test-common.bash.in index 6e2a8416..eebc895e 100644 --- a/tests/lib/test-common.bash.in +++ b/tests/lib/test-common.bash.in @@ -22,6 +22,7 @@ DOCKER_EXE="docker" DOCKER_SERVICE="docker-cor" SCRIPT_PATH=$(dirname $(readlink -f $0)) RESULT_DIR="${SCRIPT_PATH}/../results" +LIB_DIR="${SCRIPT_PATH}/../../lib" HYPERVISOR_PATH="@QEMU_PATH@" CC_SHIM_PATH="/usr/libexec/cc-shim" number_of_attempts=5 @@ -63,6 +64,22 @@ function write_result_to_file(){ echo "$test_name,$test_args,$test_data,$test_platform,$os_ver" >> "$test_file" } +# Save a test/metric result. +# This is a wrapper function to the send_results.sh command, which ultimately decides +# where and in what format to store or process the data. +# Arguments: +# Test name +# Test arguments +# Test result +# Test result unit of measurement +function save_results(){ + if [ $# != 4 ]; then + die "save_results() requires 4 parameters, not $#" + fi + + bash $LIB_DIR/send_results.sh -n "$1" -a "$2" -r "$3" -u "$4" +} + function get_average(){ test_file=$1 count=0; diff --git a/tests/metrics/README.md b/tests/metrics/README.md index 646ee3e6..d0ee29e4 100644 --- a/tests/metrics/README.md +++ b/tests/metrics/README.md @@ -1,8 +1,16 @@ # cc-oci-runtime metrics tests +## Overview +A number of metrics tests can be found living in the +[metrics](.) directory. +These metrics can be utilised to provide information on aspects of the runtime, +such as memory footprint and execution times. + +## Running the tests + ### Prerequisites Some tests require you to enable the debug mode of cc-oci-runtime. To enable it, please take a look at: -https://github.com/01org/cc-oci-runtime#18running-under-docker +[Running under Docker](https://github.com/01org/cc-oci-runtime#18running-under-docker) ### Run all metrics tests @@ -12,19 +20,60 @@ To run the metrics, please use the Makefile rule `metrics-tests` on the top leve $ sudo -E PATH=$PATH make metrics-tests ``` -This will run all metrics tests and generete a `results` directory. +This will run all metrics tests and place the results in the `lib/metrics/results` directory. + +Each file in the `results` directory contains the result for a test as a comma separated value (CSV). -Each file in the `results` directory contains the result for a test as a comma separated values or CSV. -At the end of each file you will find the result average of all the data collected by the test. +### Run a single metrics test +Some metrics tests require certain files to be pre-processed before they can be executed. To pre-process +these files execute: + +```bash +$ make metrics-tests +``` +Note, this command will also try to execute all the tests - you may wish to halt execution once the +tests start running, as all the pre-processed files will have been generated at that point. -Execute `sudo -E PATH=$PATH make metrics tests` to generate the necessary files to run individual tests. -For example: +To then run an individual test, you can for example: ```bash $ cd tests/metrics $ bash workload_time/cor_create_time.sh ``` +Note: some tests require `root` privilege to execute correctly. + +## Internals +This section covers some of the internal APIs and tools used by the metrics tests. + +### Results registration API +The metrics tests register their results via the `save_results` function located in the common test +library code. `save_results` takes four arguments: +``` +save_results "Name" "TestParams" "Result" "Units +``` + +| Argument | Description | +| ---------- | -------------------------------------------------- | +| Name | The name of the test. This name is generally used to form a unique test identifer if the data is ultimately stored | +| TestParams | Any arguments that were passed to the test that need recording | +| Result | The result of the test (without unit tags) | +| Units | The units the Result argument is measured in (KB, Seconds etc.) | + +The back end of the `save_results` function calls into the `send_results.sh` script found in the +`tests/lib` directory. The default script provided collates results into csv files under the +`tests/metrics/results` directory. The intention is that this script is replaceable by other scripts +to store results into different mediums. +`save_results` requires the `send_results.sh` script to support four arguments. These arguments +correspond directly to those of the `save_results` function above: + +| Argument | Description | +| -------- | -------------------------------------------------- | +| -n | The name of the test | +| -a | The test parameters | +| -r | The test results | +| -u | The test result unit of measurement | + ### Metrics tool The objective of `collect_mem_consmd.sh` tool is to measure the average memory consumption of any Clear Containers components under a determined stress test. This test launches a user defined number diff --git a/tests/metrics/density/docker_memory_usage.sh.in b/tests/metrics/density/docker_memory_usage.sh.in index 840ea21b..01f44ae9 100644 --- a/tests/metrics/density/docker_memory_usage.sh.in +++ b/tests/metrics/density/docker_memory_usage.sh.in @@ -40,7 +40,6 @@ CC_NUMBER="$1" WAIT_TIME="$2" TEST_NAME="Clear Containers Memory Usage" TEST_ARGS="rootfs=${IMAGE} units=kb" -TEST_RESULT_FILE=$(echo "${RESULT_DIR}/${TEST_NAME}" | sed 's| |-|g') SMEM_BIN=$(command -v smem || true) QEMU_BIN="@QEMU_PATH@" PROXY_BIN="@libexecdir@/cc-proxy" @@ -127,7 +126,7 @@ function get_docker_memory_usage(){ cc_proxy_mem="$(echo "scale=2; $proxy_mem / $CC_NUMBER" | bc -l)" cc_mem_usage=$(echo "scale=2; $qemu_mem + $cc_shim_mem + $cc_proxy_mem" | bc -l) - write_result_to_file "$TEST_NAME" "$TEST_ARGS" "$cc_mem_usage" "$TEST_RESULT_FILE" + save_results "$TEST_NAME" "$TEST_ARGS" "$cc_mem_usage" "KB" docker rm -f ${containers[@]} } @@ -147,7 +146,4 @@ fi systemctl restart cc-proxy systemctl restart docker -backup_old_file "$TEST_RESULT_FILE" -write_csv_header "$TEST_RESULT_FILE" get_docker_memory_usage -get_average "$TEST_RESULT_FILE"