From e86d9557721965bd76b8c52f241e161a14c6f4de Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Wed, 9 Mar 2016 09:26:05 -0500 Subject: [PATCH 01/14] WIP on refactor: common functionality for execution moved from resttest class --- pyresttest/macros.py | 144 +++++++++++++++++++++++++++ pyresttest/parsing.py | 82 ++++++++++------ pyresttest/resttest.py | 189 +----------------------------------- pyresttest/test_macros.py | 39 ++++++++ pyresttest/test_parsing.py | 44 +++++++-- pyresttest/test_resttest.py | 36 ------- setup.py | 2 +- 7 files changed, 275 insertions(+), 261 deletions(-) create mode 100644 pyresttest/macros.py create mode 100644 pyresttest/test_macros.py diff --git a/pyresttest/macros.py b/pyresttest/macros.py new file mode 100644 index 00000000..f14e543c --- /dev/null +++ b/pyresttest/macros.py @@ -0,0 +1,144 @@ +import sys +from email import message_from_string # For headers handling + +from .generators import parse_generator +from .parsing import * + +# Contains all the framework-general items for macros +# This allows it to be separated from resttest.py +# This way macros (test/benchmark/etc) can import shared methods +# Without creating circular import loops + +# This is all our general execution framework stuff + HTTP request stuff + +DEFAULT_TIMEOUT = 10 # Seconds, FIXME remove from the tests class and move to here + +def resolve_option(name, object_self, test_config, cmdline_args): + """ Look for a specific field name in a set of objects + return value if found, return none if not found """ + for i in (object_self, test_config, cmdline_args): + v = gettattr(i, name, None) + if v is not None: + return v + return None + +class TestConfig: + """ Configuration for a test run """ + timeout = DEFAULT_TIMEOUT # timeout of tests, in seconds + print_bodies = False # Print response bodies in all cases + print_headers = False # Print response bodies in all cases + retries = 0 # Retries on failures + test_parallel = False # Allow parallel execution of tests in a test set, for speed? + interactive = False + verbose = False + ssl_insecure = False + skip_term_colors = False # Turn off output term colors + + # Binding and creation of generators + variable_binds = None + generators = None # Map of generator name to generator function + + def __str__(self): + return json.dumps(self, default=safe_to_json) + +class TestSet: + """ Encapsulates a set of tests and test configuration for them """ + tests = list() + benchmarks = list() + config = TestConfig() + + def __init__(self): + self.config = TestConfig() + self.tests = list() + self.benchmarks = list() + + def __str__(self): + return json.dumps(self, default=safe_to_json) + + +class BenchmarkResult: + """ Stores results from a benchmark for reporting use """ + group = None + name = u'unnamed' + + results = dict() # Benchmark output, map the metric to the result array for that metric + aggregates = list() # List of aggregates, as tuples of (metricname, aggregate, result) + failures = 0 # Track call count that failed + + def __init__(self): + self.aggregates = list() + self.results = list() + + def __str__(self): + return json.dumps(self, default=safe_to_json) + + +class TestResponse: + """ Encapsulates everything about a test response """ + test = None # Test run + response_code = None + + body = None # Response body, if tracked + + passed = False + response_headers = None + failures = None + + def __init__(self): + self.failures = list() + + def __str__(self): + return json.dumps(self, default=safe_to_json) + +def parse_headers(header_string): + """ Parse a header-string into individual headers + Implementation based on: http://stackoverflow.com/a/5955949/95122 + Note that headers are a list of (key, value) since duplicate headers are allowed + + NEW NOTE: keys & values are unicode strings, but can only contain ISO-8859-1 characters + """ + # First line is request line, strip it out + if not header_string: + return list() + request, headers = header_string.split('\r\n', 1) + if not headers: + return list() + + # Python 2.6 message header parsing fails for Unicode strings, 2.7 is fine. Go figure. + if sys.version_info < (2,7): + header_msg = message_from_string(headers.encode(HEADER_ENCODING)) + return [(text_type(k.lower(), HEADER_ENCODING), text_type(v, HEADER_ENCODING)) + for k, v in header_msg.items()] + else: + header_msg = message_from_string(headers) + # Note: HTTP headers are *case-insensitive* per RFC 2616 + return [(k.lower(), v) for k, v in header_msg.items()] + +def parse_configuration(node, base_config=None): + """ Parse input config to configuration information """ + test_config = base_config + if not test_config: + test_config = TestConfig() + + node = lowercase_keys(flatten_dictionaries(node)) # Make it usable + + for key, value in node.items(): + if key == u'timeout': + test_config.timeout = int(value) + elif key == u'print_bodies': + test_config.print_bodies = safe_to_bool(value) + elif key == u'retries': + test_config.retries = int(value) + elif key == u'variable_binds': + if not test_config.variable_binds: + test_config.variable_binds = dict() + test_config.variable_binds.update(flatten_dictionaries(value)) + elif key == u'generators': + flat = flatten_dictionaries(value) + gen_map = dict() + for generator_name, generator_config in flat.items(): + gen = parse_generator(generator_config) + gen_map[str(generator_name)] = gen + test_config.generators = gen_map + + return test_config \ No newline at end of file diff --git a/pyresttest/parsing.py b/pyresttest/parsing.py index 9e20f29d..0e30766f 100644 --- a/pyresttest/parsing.py +++ b/pyresttest/parsing.py @@ -1,7 +1,8 @@ from __future__ import absolute_import import sys import string - +import os +from optparse import OptionParser # Python 3 compatibility shims from . import six @@ -100,35 +101,52 @@ def safe_to_bool(input): raise TypeError( 'Input Object is not a boolean or string form of boolean!') - -class SuperConfigurator(object): - """ It's a bird! It's a plane! No, it's.... - The solution to handling horribly nasty, thorny configuration handling methods - - """ - - def run_configure(self, key, value, configurable, validator_func=None, converter_func=None, store_func=None, *args, **kwargs): - """ Run a single configuration element - Run a validator on the value, if supplied - Run a converter_funct to turn the value into something to storeable: - converter_func takes params (value) at least and throws exception if failed - If a store_func is supplied, use that to store the option - store_func needs to take params (object, key, value, args, kwargs) - If store_func NOT supplied we do a setattr on object - """ - if validator_func and not validator(value): - raise TypeError("Illegal argument for {0}".format(value)) - storeable = value - if converter_func: - storeable = converter_func(value) - if store_func: - store_func(configurable, key, storeable) +def parse_command_line_args(args_in): + """ Runs everything needed to execute from the command line, so main method is callable without arg parsing """ + parser = OptionParser( + usage="usage: %prog base_url test_filename.yaml [options] ") + parser.add_option(u"--print-bodies", help="Print all response bodies", + action="store", type="string", dest="print_bodies") + parser.add_option(u"--print-headers", help="Print all response headers", + action="store", type="string", dest="print_headers") + parser.add_option(u"--log", help="Logging level", + action="store", type="string") + parser.add_option(u"--interactive", help="Interactive mode", + action="store", type="string") + parser.add_option( + u"--url", help="Base URL to run tests against", action="store", type="string") + parser.add_option(u"--test", help="Test file to use", + action="store", type="string") + parser.add_option(u'--import_extensions', + help='Extensions to import, separated by semicolons', action="store", type="string") + parser.add_option( + u'--vars', help='Variables to set, as a YAML dictionary', action="store", type="string") + parser.add_option(u'--verbose', help='Put cURL into verbose mode for extra debugging power', + action='store_true', default=False, dest="verbose") + parser.add_option(u'--ssl-insecure', help='Disable cURL host and peer cert verification', + action='store_true', default=False, dest="ssl_insecure") + parser.add_option(u'--absolute-urls', help='Enable absolute URLs in tests instead of relative paths', + action="store_true", dest="absolute_urls") + parser.add_option(u'--skip_term_colors', help='Turn off the output term colors', + action='store_true', default=False, dest="skip_term_colors") + + (args, unparsed_args) = parser.parse_args(args_in) + args = vars(args) + + # Handle url/test as named, or, failing that, positional arguments + if not args['url'] or not args['test']: + if len(unparsed_args) == 2: + args[u'url'] = unparsed_args[0] + args[u'test'] = unparsed_args[1] + elif len(unparsed_args) == 1 and args['url']: + args['test'] = unparsed_args[0] + elif len(unparsed_args) == 1 and args['test']: + args['url'] = unparsed_args[0] else: - configurable.setattr(configurable, key, value) - - def configure(self, configs, configurable, handler, *args, **kwargs): - """ Use the configs and configurable to parse""" - for key, value in configs.items(): - # Read handler arguments and use them to call the configurator - handler[key] = config_options - self.run_configure(value, configurable) + parser.print_help() + parser.error( + "wrong number of arguments, need both url and test filename, either as 1st and 2nd parameters or via --url and --test") + + # So modules can be loaded from current folder + args['cwd'] = os.path.realpath(os.path.abspath(os.getcwd())) + return args \ No newline at end of file diff --git a/pyresttest/resttest.py b/pyresttest/resttest.py index 7bfb33c1..ef19725a 100644 --- a/pyresttest/resttest.py +++ b/pyresttest/resttest.py @@ -8,8 +8,6 @@ import json import csv import logging -from optparse import OptionParser -from email import message_from_string # For headers handling import time try: @@ -36,12 +34,12 @@ from pyresttest import generators from pyresttest import validators from pyresttest import tests - from pyresttest.generators import parse_generator - from pyresttest.parsing import flatten_dictionaries, lowercase_keys, safe_to_bool, safe_to_json + from pyresttest.parsing import * from pyresttest.validators import Failure from pyresttest.tests import Test, DEFAULT_TIMEOUT from pyresttest.benchmarks import Benchmark, AGGREGATES, METRICS, parse_benchmark + from pyresttest.macros import * else: # Normal imports from . import six from .six import text_type @@ -50,15 +48,16 @@ from . import binding from .binding import Context from . import generators - from .generators import parse_generator from . import parsing - from .parsing import flatten_dictionaries, lowercase_keys, safe_to_bool, safe_to_json + from .parsing import * from . import validators from .validators import Failure from . import tests from .tests import Test, DEFAULT_TIMEOUT from . import benchmarks from .benchmarks import Benchmark, AGGREGATES, METRICS, parse_benchmark + from . import macros + from .macros import * """ Executable class, ties everything together into the framework. @@ -96,77 +95,6 @@ def __exit__(self, etype, value, traceback): if self.newPath: # Don't CD to nothingness os.chdir(self.savedPath) - -class TestConfig: - """ Configuration for a test run """ - timeout = DEFAULT_TIMEOUT # timeout of tests, in seconds - print_bodies = False # Print response bodies in all cases - print_headers = False # Print response bodies in all cases - retries = 0 # Retries on failures - test_parallel = False # Allow parallel execution of tests in a test set, for speed? - interactive = False - verbose = False - ssl_insecure = False - skip_term_colors = False # Turn off output term colors - - # Binding and creation of generators - variable_binds = None - generators = None # Map of generator name to generator function - - def __str__(self): - return json.dumps(self, default=safe_to_json) - - -class TestSet: - """ Encapsulates a set of tests and test configuration for them """ - tests = list() - benchmarks = list() - config = TestConfig() - - def __init__(self): - self.config = TestConfig() - self.tests = list() - self.benchmarks = list() - - def __str__(self): - return json.dumps(self, default=safe_to_json) - - -class BenchmarkResult: - """ Stores results from a benchmark for reporting use """ - group = None - name = u'unnamed' - - results = dict() # Benchmark output, map the metric to the result array for that metric - aggregates = list() # List of aggregates, as tuples of (metricname, aggregate, result) - failures = 0 # Track call count that failed - - def __init__(self): - self.aggregates = list() - self.results = list() - - def __str__(self): - return json.dumps(self, default=safe_to_json) - - -class TestResponse: - """ Encapsulates everything about a test response """ - test = None # Test run - response_code = None - - body = None # Response body, if tracked - - passed = False - response_headers = None - failures = None - - def __init__(self): - self.failures = list() - - def __str__(self): - return json.dumps(self, default=safe_to_json) - - def read_test_file(path): """ Read test file at 'path' in YAML """ # TODO allow use of safe_load_all to handle multiple test sets in a given @@ -175,31 +103,6 @@ def read_test_file(path): return teststruct -def parse_headers(header_string): - """ Parse a header-string into individual headers - Implementation based on: http://stackoverflow.com/a/5955949/95122 - Note that headers are a list of (key, value) since duplicate headers are allowed - - NEW NOTE: keys & values are unicode strings, but can only contain ISO-8859-1 characters - """ - # First line is request line, strip it out - if not header_string: - return list() - request, headers = header_string.split('\r\n', 1) - if not headers: - return list() - - # Python 2.6 message header parsing fails for Unicode strings, 2.7 is fine. Go figure. - if sys.version_info < (2,7): - header_msg = message_from_string(headers.encode(HEADER_ENCODING)) - return [(text_type(k.lower(), HEADER_ENCODING), text_type(v, HEADER_ENCODING)) - for k, v in header_msg.items()] - else: - header_msg = message_from_string(headers) - # Note: HTTP headers are *case-insensitive* per RFC 2616 - return [(k.lower(), v) for k, v in header_msg.items()] - - def parse_testsets(base_url, test_structure, test_files=set(), working_directory=None, vars=None): """ Convert a Python data structure read from validated YAML to a set of structured testsets The data structure is assumed to be a list of dictionaries, each of which describes: @@ -264,37 +167,6 @@ def parse_testsets(base_url, test_structure, test_files=set(), working_directory testsets.append(testset) return testsets - -def parse_configuration(node, base_config=None): - """ Parse input config to configuration information """ - test_config = base_config - if not test_config: - test_config = TestConfig() - - node = lowercase_keys(flatten_dictionaries(node)) # Make it usable - - for key, value in node.items(): - if key == u'timeout': - test_config.timeout = int(value) - elif key == u'print_bodies': - test_config.print_bodies = safe_to_bool(value) - elif key == u'retries': - test_config.retries = int(value) - elif key == u'variable_binds': - if not test_config.variable_binds: - test_config.variable_binds = dict() - test_config.variable_binds.update(flatten_dictionaries(value)) - elif key == u'generators': - flat = flatten_dictionaries(value) - gen_map = dict() - for generator_name, generator_config in flat.items(): - gen = parse_generator(generator_config) - gen_map[str(generator_name)] = gen - test_config.generators = gen_map - - return test_config - - def read_file(path): """ Read an input into a file, doing necessary conversions around relative path handling """ with open(path, "r") as f: @@ -855,57 +727,6 @@ def main(args): sys.exit(failures) -def parse_command_line_args(args_in): - """ Runs everything needed to execute from the command line, so main method is callable without arg parsing """ - parser = OptionParser( - usage="usage: %prog base_url test_filename.yaml [options] ") - parser.add_option(u"--print-bodies", help="Print all response bodies", - action="store", type="string", dest="print_bodies") - parser.add_option(u"--print-headers", help="Print all response headers", - action="store", type="string", dest="print_headers") - parser.add_option(u"--log", help="Logging level", - action="store", type="string") - parser.add_option(u"--interactive", help="Interactive mode", - action="store", type="string") - parser.add_option( - u"--url", help="Base URL to run tests against", action="store", type="string") - parser.add_option(u"--test", help="Test file to use", - action="store", type="string") - parser.add_option(u'--import_extensions', - help='Extensions to import, separated by semicolons', action="store", type="string") - parser.add_option( - u'--vars', help='Variables to set, as a YAML dictionary', action="store", type="string") - parser.add_option(u'--verbose', help='Put cURL into verbose mode for extra debugging power', - action='store_true', default=False, dest="verbose") - parser.add_option(u'--ssl-insecure', help='Disable cURL host and peer cert verification', - action='store_true', default=False, dest="ssl_insecure") - parser.add_option(u'--absolute-urls', help='Enable absolute URLs in tests instead of relative paths', - action="store_true", dest="absolute_urls") - parser.add_option(u'--skip_term_colors', help='Turn off the output term colors', - action='store_true', default=False, dest="skip_term_colors") - - (args, unparsed_args) = parser.parse_args(args_in) - args = vars(args) - - # Handle url/test as named, or, failing that, positional arguments - if not args['url'] or not args['test']: - if len(unparsed_args) == 2: - args[u'url'] = unparsed_args[0] - args[u'test'] = unparsed_args[1] - elif len(unparsed_args) == 1 and args['url']: - args['test'] = unparsed_args[0] - elif len(unparsed_args) == 1 and args['test']: - args['url'] = unparsed_args[0] - else: - parser.print_help() - parser.error( - "wrong number of arguments, need both url and test filename, either as 1st and 2nd parameters or via --url and --test") - - # So modules can be loaded from current folder - args['cwd'] = os.path.realpath(os.path.abspath(os.getcwd())) - return args - - def command_line_run(args_in): args = parse_command_line_args(args_in) main(args) diff --git a/pyresttest/test_macros.py b/pyresttest/test_macros.py new file mode 100644 index 00000000..ecf8790c --- /dev/null +++ b/pyresttest/test_macros.py @@ -0,0 +1,39 @@ +import unittest + +from . import macros +from .macros import * + +class TestMacros(unittest.TestCase): + def test_parse_headers(self): + """ Basic header parsing tests """ + headerstring = u'HTTP/1.1 200 OK\r\nDate: Mon, 29 Dec 2014 02:42:33 GMT\r\nExpires: -1\r\nCache-Control: private, max-age=0\r\nContent-Type: text/html; charset=ISO-8859-1\r\nX-XSS-Protection: 1; mode=block\r\nX-Frame-Options: SAMEORIGIN\r\nAlternate-Protocol: 80:quic,p=0.02\r\nTransfer-Encoding: chunked\r\n\r\n' + header_list = parse_headers(headerstring) + header_dict = dict(header_list) + + self.assertTrue(isinstance(header_list, list)) + self.assertEqual('-1', header_dict['expires']) + self.assertEqual('private, max-age=0', header_dict['cache-control']) + self.assertEqual(8, len(header_dict)) + + # Error cases + # No headers + result = parse_headers("") # Shouldn't throw exception + self.assertTrue(isinstance(result, list)) + self.assertEqual(0, len(result)) + + # Just the HTTP prefix + result = parse_headers( + 'HTTP/1.1 200 OK\r\n\r\n') # Shouldn't throw exception + self.assertTrue(isinstance(result, list)) + self.assertEqual(0, len(result)) + + def test_parse_headers_multiples(self): + """ Test headers where there are duplicate values set """ + headerstring = u'HTTP/1.1 200 OK\r\nDate: Mon, 29 Dec 2014 02:42:33 GMT\r\nAccept: text/html\r\nAccept: application/json\r\n\r\n' + headers = parse_headers(headerstring) + + self.assertTrue(isinstance(headers, list)) + self.assertEqual(3, len(headers)) + self.assertEqual(('date', 'Mon, 29 Dec 2014 02:42:33 GMT'), headers[0]) + self.assertEqual(('accept', 'text/html'), headers[1]) + self.assertEqual(('accept', 'application/json'), headers[2]) diff --git a/pyresttest/test_parsing.py b/pyresttest/test_parsing.py index 108ab870..6a928fef 100644 --- a/pyresttest/test_parsing.py +++ b/pyresttest/test_parsing.py @@ -127,13 +127,41 @@ def __init__(self): self.assertEqual({'newval': 'cherries'}, safe_to_json(Special())) - def test_run_configure(self): - """ Test the configure function use """ - converter = safe_to_bool - pass - - def test_configure(self): - """ Do stuff here """ - pass + def test_cmdline_args_parsing_basic(self): + cmdline = [ + 'my_url', 'my_test_filename', + '--print-bodies', 'True' + ] + args = parse_command_line_args(cmdline) + self.assertEqual('my_url', args['url']) + self.assertEqual('my_test_filename', args['test']) + self.assertEqual('True', args['print_bodies']) + + def test_cmdline_args_parsing_positional(self): + """ Tests cases where test and url are from named arguments, not positional """ + + cmdline = [ + '--url', 'my_url', + '--test', 'my_test_filename', + ] + + args = parse_command_line_args(cmdline) + self.assertEqual('my_url', args['url']) + self.assertEqual('my_test_filename', args['test']) + + # url from position arg, test as named arg + del cmdline[0] + args = parse_command_line_args(cmdline) + self.assertEqual('my_url', args['url']) + self.assertEqual('my_test_filename', args['test']) + + cmdline = [ + '--url', 'my_url', + 'my_test_filename', + ] + args = parse_command_line_args(cmdline) + self.assertEqual('my_url', args['url']) + self.assertEqual('my_test_filename', args['test']) + if __name__ == '__main__': unittest.main() diff --git a/pyresttest/test_resttest.py b/pyresttest/test_resttest.py index ffcdd936..8b2c7e27 100644 --- a/pyresttest/test_resttest.py +++ b/pyresttest/test_resttest.py @@ -109,41 +109,5 @@ def test_jmespath_import(self): self.assertTrue('jmespath' in validators.EXTRACTORS) jmespathext = validators.EXTRACTORS['jmespath']('test1.a') - def test_cmdline_args_parsing_basic(self): - cmdline = [ - 'my_url', 'my_test_filename', - '--print-bodies', 'True' - ] - args = parse_command_line_args(cmdline) - self.assertEqual('my_url', args['url']) - self.assertEqual('my_test_filename', args['test']) - self.assertEqual('True', args['print_bodies']) - - def test_cmdline_args_parsing_positional(self): - """ Tests cases where test and url are from named arguments, not positional """ - - cmdline = [ - '--url', 'my_url', - '--test', 'my_test_filename', - ] - - args = parse_command_line_args(cmdline) - self.assertEqual('my_url', args['url']) - self.assertEqual('my_test_filename', args['test']) - - # url from position arg, test as named arg - del cmdline[0] - args = parse_command_line_args(cmdline) - self.assertEqual('my_url', args['url']) - self.assertEqual('my_test_filename', args['test']) - - cmdline = [ - '--url', 'my_url', - 'my_test_filename', - ] - args = parse_command_line_args(cmdline) - self.assertEqual('my_url', args['url']) - self.assertEqual('my_test_filename', args['test']) - if __name__ == '__main__': unittest.main() diff --git a/setup.py b/setup.py index 6224c018..c090d4c4 100755 --- a/setup.py +++ b/setup.py @@ -36,7 +36,7 @@ ], py_modules=['pyresttest.resttest', 'pyresttest.generators', 'pyresttest.binding', 'pyresttest.parsing', 'pyresttest.validators', 'pyresttest.contenthandling', - 'pyresttest.benchmarks', 'pyresttest.tests', + 'pyresttest.benchmarks', 'pyresttest.tests', 'pyresttest.macros', 'pyresttest.six', 'pyresttest.ext.validator_jsonschema', 'pyresttest.ext.extractor_jmespath'], From ad80e178b3f3de0c36e21b04eb105e39fbd6c87a Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 18:52:15 -0500 Subject: [PATCH 02/14] First rough hack at moving the test run method into test class --- pyresttest/functionaltest.py | 40 +++++----- pyresttest/macros.py | 18 ++++- pyresttest/resttest.py | 143 +++-------------------------------- pyresttest/tests.py | 140 +++++++++++++++++++++++++++++++++- 4 files changed, 182 insertions(+), 159 deletions(-) diff --git a/pyresttest/functionaltest.py b/pyresttest/functionaltest.py index d05899cf..1608a0ba 100644 --- a/pyresttest/functionaltest.py +++ b/pyresttest/functionaltest.py @@ -54,7 +54,7 @@ def test_get(self): """ Basic local get test """ test = Test() test.url = self.prefix + '/api/person/' - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertTrue(test_response.passed) self.assertEqual(200, test_response.response_code) @@ -62,7 +62,7 @@ def test_head(self): """ Calls github API to test the HEAD method, ugly but Django tastypie won't support it """ test = Test() test.url = 'https://api.github.com/users/svanoort' - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertTrue(test_response.passed) self.assertEqual(200, test_response.response_code) print("Github API response headers: \n{0}".format(test_response.response_headers)) @@ -77,7 +77,7 @@ def test_patch(self): test.headers = {u'Content-Type': u'application/json', u'X-HTTP-Method-Override': u'PATCH'} test.expected_status = [202] # Django returns 202 - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertTrue(test_response.passed) #self.assertEqual(202, test_response.response_code) @@ -86,7 +86,7 @@ def test_get_redirect(self): test = Test() test.curl_options = {'FOLLOWLOCATION': True} test.url = self.prefix + '/api/person' - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertTrue(test_response.passed) self.assertEqual(200, test_response.response_code) @@ -115,7 +115,7 @@ def test_get_validators(self): test.validators.append( validators.parse_validator('compare', cfg_compare_id)) - test_response = resttest.run_test(test) + test_response = test.execute_macro() for failure in test_response.failures: print("REAL FAILURE") print("Test Failure, failure type: {0}, Reason: {1}".format( @@ -140,7 +140,7 @@ def test_get_validators_fail(self): 'expected': 'NotJenkins'} test.validators.append( validators.parse_validator('compare', cfg_compare)) - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertFalse(test_response.passed) self.assertTrue(test_response.failures) self.assertEqual(3, len(test_response.failures)) @@ -148,7 +148,7 @@ def test_get_validators_fail(self): def test_detailed_get(self): test = Test() test.url = self.prefix + '/api/person/1/' - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertEqual(True, test_response.passed) self.assertEqual(200, test_response.response_code) @@ -164,7 +164,7 @@ def test_header_extraction(self): key2: validators.HeaderExtractor.parse('sErVer') } my_context = Context() - test_response = resttest.run_test(test, context=my_context) + test_response = test.execute_macro(context=my_context) val1 = my_context.get_value(key1) val2 = my_context.get_value(key2) self.assertEqual(val1, val2) @@ -182,7 +182,7 @@ def test_header_validators(self): test.validators = list() test.validators.append( validators.parse_validator('comparator', config)) - result = resttest.run_test(test) + result = test.execute_macro() if result.failures: for fail in result.failures: @@ -193,7 +193,7 @@ def test_failed_get(self): """ Test GET that should fail """ test = Test() test.url = self.prefix + '/api/person/500/' - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertEqual(False, test_response.passed) self.assertEqual(404, test_response.response_code) @@ -204,7 +204,7 @@ def test_put_inplace(self): test.method = u'PUT' test.body = '{"first_name": "Gaius","id": 1,"last_name": "Baltar","login": "gbaltar"}' test.headers = {u'Content-Type': u'application/json'} - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertEqual(True, test_response.passed) self.assertEqual(200, test_response.response_code) @@ -216,14 +216,14 @@ def test_put_created(self): test.expected_status = [200, 201, 204] test.body = '{"first_name": "Willim","last_name": "Adama","login":"theadmiral", "id": 100}' test.headers = {u'Content-Type': u'application/json'} - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertEqual(True, test_response.passed) self.assertEqual(201, test_response.response_code) # Test it was actually created test2 = Test() test2.url = test.url - test_response2 = resttest.run_test(test2) + test_response2 = test2.execute_macro() self.assertTrue(test_response2.passed) self.assertTrue( u'"last_name": "Adama"' in test_response2.body.decode('UTF-8')) @@ -238,14 +238,14 @@ def test_post(self): test.expected_status = [200, 201, 204] test.body = '{"first_name": "Willim","last_name": "Adama","login": "theadmiral"}' test.headers = {u'Content-Type': u'application/json'} - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertEqual(True, test_response.passed) self.assertEqual(201, test_response.response_code) # Test user was created test2 = Test() test2.url = self.prefix + '/api/person/?login=theadmiral' - test_response2 = resttest.run_test(test2) + test_response2 = test2.execute_macro() self.assertTrue(test_response2.passed) # Test JSON load/dump round trip on body @@ -261,21 +261,21 @@ def test_delete(self): test.url = self.prefix + '/api/person/1/' test.expected_status = [200, 202, 204] test.method = u'DELETE' - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertEqual(True, test_response.passed) self.assertEqual(204, test_response.response_code) # Verify it's really gone test.method = u'GET' test.expected_status = [404] - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertEqual(True, test_response.passed) self.assertEqual(404, test_response.response_code) # Check it's gone by name test2 = Test() test2.url = self.prefix + '/api/person/?first_name__contains=Gaius' - test_response2 = resttest.run_test(test2) + test_response2 = test2.execute_macro() self.assertTrue(test_response2.passed) self.assertTrue(u'"objects": []' in test_response2.body.decode('UTF-8')) @@ -338,7 +338,7 @@ def test_get_validators_jmespath_fail(self): 'expected': 'NotJenkins'} test.validators.append( validators.parse_validator('compare', cfg_compare)) - test_response = resttest.run_test(test) + test_response = test.execute_macro() self.assertFalse(test_response.passed) self.assertTrue(test_response.failures) self.assertEqual(3, len(test_response.failures)) @@ -368,7 +368,7 @@ def test_get_validators_jmespath(self): test.validators.append( validators.parse_validator('compare', cfg_compare_id)) - test_response = resttest.run_test(test) + test_response = test.execute_macro() for failure in test_response.failures: print("REAL FAILURE") print("Test Failure, failure type: {0}, Reason: {1}".format( diff --git a/pyresttest/macros.py b/pyresttest/macros.py index f14e543c..40c8b7db 100644 --- a/pyresttest/macros.py +++ b/pyresttest/macros.py @@ -11,7 +11,9 @@ # This is all our general execution framework stuff + HTTP request stuff +ESCAPE_DECODING = 'string-escape' DEFAULT_TIMEOUT = 10 # Seconds, FIXME remove from the tests class and move to here +HEADER_ENCODING ='ISO-8859-1' # Per RFC 2616 def resolve_option(name, object_self, test_config, cmdline_args): """ Look for a specific field name in a set of objects @@ -22,7 +24,15 @@ def resolve_option(name, object_self, test_config, cmdline_args): return v return None -class TestConfig: +class MacroCallbacks(object): + """ Callbacks bundle to handle reporting """ + def start_macro(self, input): lambda x: None + def end_macro(self, input): lambda x: None + def pre_request(self, input): lambda x: None + def post_request(self, input): lambda x: None + def log_intermediate(self, input): lambda x: None + +class TestConfig(object): """ Configuration for a test run """ timeout = DEFAULT_TIMEOUT # timeout of tests, in seconds print_bodies = False # Print response bodies in all cases @@ -41,7 +51,7 @@ class TestConfig: def __str__(self): return json.dumps(self, default=safe_to_json) -class TestSet: +class TestSet(object): """ Encapsulates a set of tests and test configuration for them """ tests = list() benchmarks = list() @@ -56,7 +66,7 @@ def __str__(self): return json.dumps(self, default=safe_to_json) -class BenchmarkResult: +class BenchmarkResult(object): """ Stores results from a benchmark for reporting use """ group = None name = u'unnamed' @@ -73,7 +83,7 @@ def __str__(self): return json.dumps(self, default=safe_to_json) -class TestResponse: +class TestResponse(object): """ Encapsulates everything about a test response """ test = None # Test run response_code = None diff --git a/pyresttest/resttest.py b/pyresttest/resttest.py index ef19725a..1635e3f7 100644 --- a/pyresttest/resttest.py +++ b/pyresttest/resttest.py @@ -68,7 +68,6 @@ - Collect and report on test/benchmark results - Perform analysis on benchmark results """ -HEADER_ENCODING ='ISO-8859-1' # Per RFC 2616 LOGGING_LEVELS = {'debug': logging.DEBUG, 'info': logging.INFO, 'warning': logging.WARNING, @@ -175,139 +174,7 @@ def read_file(path): return string -def run_test(mytest, test_config=TestConfig(), context=None, curl_handle=None, *args, **kwargs): - """ Put together test pieces: configure & run actual test, return results """ - # Initialize a context if not supplied - my_context = context - if my_context is None: - my_context = Context() - - mytest.update_context_before(my_context) - templated_test = mytest.realize(my_context) - curl = templated_test.configure_curl( - timeout=test_config.timeout, context=my_context, curl_handle=curl_handle) - result = TestResponse() - result.test = templated_test - - # reset the body, it holds values from previous runs otherwise - headers = MyIO() - body = MyIO() - curl.setopt(pycurl.WRITEFUNCTION, body.write) - curl.setopt(pycurl.HEADERFUNCTION, headers.write) - if test_config.verbose: - curl.setopt(pycurl.VERBOSE, True) - if test_config.ssl_insecure: - curl.setopt(pycurl.SSL_VERIFYPEER, 0) - curl.setopt(pycurl.SSL_VERIFYHOST, 0) - - result.passed = None - - if test_config.interactive: - print("===================================") - print("%s" % mytest.name) - print("-----------------------------------") - print("REQUEST:") - print("%s %s" % (templated_test.method, templated_test.url)) - print("HEADERS:") - print("%s" % (templated_test.headers)) - if mytest.body is not None: - print("\n%s" % templated_test.body) - raw_input("Press ENTER when ready (%d): " % (mytest.delay)) - - if mytest.delay > 0: - print("Delaying for %ds" % mytest.delay) - time.sleep(mytest.delay) - - try: - curl.perform() # Run the actual call - except Exception as e: - # Curl exception occurred (network error), do not pass go, do not - # collect $200 - trace = traceback.format_exc() - result.failures.append(Failure(message="Curl Exception: {0}".format( - e), details=trace, failure_type=validators.FAILURE_CURL_EXCEPTION)) - result.passed = False - curl.close() - return result - - # Retrieve values - result.body = body.getvalue() - body.close() - result.response_headers = text_type(headers.getvalue(), HEADER_ENCODING) # Per RFC 2616 - headers.close() - - response_code = curl.getinfo(pycurl.RESPONSE_CODE) - result.response_code = response_code - - logger.debug("Initial Test Result, based on expected response code: " + - str(response_code in mytest.expected_status)) - - if response_code in mytest.expected_status: - result.passed = True - else: - # Invalid response code - result.passed = False - failure_message = "Invalid HTTP response code: response code {0} not in expected codes [{1}]".format( - response_code, mytest.expected_status) - result.failures.append(Failure( - message=failure_message, details=None, failure_type=validators.FAILURE_INVALID_RESPONSE)) - - # Parse HTTP headers - try: - result.response_headers = parse_headers(result.response_headers) - except Exception as e: - trace = traceback.format_exc() - result.failures.append(Failure(message="Header parsing exception: {0}".format( - e), details=trace, failure_type=validators.FAILURE_TEST_EXCEPTION)) - result.passed = False - curl.close() - return result - - # print str(test_config.print_bodies) + ',' + str(not result.passed) + ' , - # ' + str(test_config.print_bodies or not result.passed) - - head = result.response_headers - - # execute validator on body - if result.passed is True: - body = result.body - if mytest.validators is not None and isinstance(mytest.validators, list): - logger.debug("executing this many validators: " + - str(len(mytest.validators))) - failures = result.failures - for validator in mytest.validators: - validate_result = validator.validate( - body=body, headers=head, context=my_context) - if not validate_result: - result.passed = False - # Proxy for checking if it is a Failure object, because of - # import issues with isinstance there - if hasattr(validate_result, 'details'): - failures.append(validate_result) - # TODO add printing of validation for interactive mode - else: - logger.debug("no validators found") - - # Only do context updates if test was successful - mytest.update_context_after(result.body, head, my_context) - - # Print response body if override is set to print all *OR* if test failed - # (to capture maybe a stack trace) - if test_config.print_bodies or not result.passed: - if test_config.interactive: - print("RESPONSE:") - print(result.body.decode(ESCAPE_DECODING)) - - if test_config.print_headers or not result.passed: - if test_config.interactive: - print("RESPONSE HEADERS:") - print(result.response_headers) - - # TODO add string escape on body output - logger.debug(result) - - return result def run_benchmark(benchmark, test_config=TestConfig(), context=None, *args, **kwargs): @@ -486,6 +353,10 @@ def log_failure(failure, context=None, test_config=TestConfig()): if failure.details: logger.error("Validator/Error details:" + str(failure.details)) +class LoggerCallbacks(MacroCallbacks): + """ Uses a standard python logger """ + def log_intermediate(self, input): + logger.debug(input) def run_testsets(testsets): """ Execute a set of tests, using given TestSet list input """ @@ -494,6 +365,10 @@ def run_testsets(testsets): total_failures = 0 myinteractive = False curl_handle = pycurl.Curl() + + # Invoked during macro execution to report results + # FIXME I need to set up for logging before/after/during requests + callbacks = LoggerCallbacks() for testset in testsets: mytests = testset.tests @@ -523,7 +398,7 @@ def run_testsets(testsets): group_results[test.group] = list() group_failure_counts[test.group] = 0 - result = run_test(test, test_config=myconfig, context=context, curl_handle=curl_handle) + result = test.execute_macro(test_config=myconfig, context=context, curl_handle=curl_handle) result.body = None # Remove the body, save some memory! if not result.passed: # Print failure, increase failure counts for that test group diff --git a/pyresttest/tests.py b/pyresttest/tests.py index 56365fce..4eca0436 100644 --- a/pyresttest/tests.py +++ b/pyresttest/tests.py @@ -5,12 +5,14 @@ import pycurl import sys - +from .binding import Context from . import contenthandling from .contenthandling import ContentHandler from . import validators +from .validators import Failure from . import parsing from .parsing import * +from .macros import * # Find the best implementation available on this platform try: @@ -286,6 +288,142 @@ def __init__(self): def __str__(self): return json.dumps(self, default=safe_to_json) + def execute_macro(self, test_config=TestConfig(), context=None, cmdline_args=None, callbacks=MacroCallbacks(), curl_handle=None, *args, **kwargs): + """ Put together test pieces: configure & run actual test, return results """ + + # Initialize a context if not supplied + my_context = context + if my_context is None: + my_context = Context() + + mytest=self + + mytest.update_context_before(my_context) + templated_test = mytest.realize(my_context) + curl = templated_test.configure_curl( + timeout=test_config.timeout, context=my_context, curl_handle=curl_handle) + result = TestResponse() + result.test = templated_test + + # reset the body, it holds values from previous runs otherwise + headers = MyIO() + body = MyIO() + curl.setopt(pycurl.WRITEFUNCTION, body.write) + curl.setopt(pycurl.HEADERFUNCTION, headers.write) + if test_config.verbose: + curl.setopt(pycurl.VERBOSE, True) + if test_config.ssl_insecure: + curl.setopt(pycurl.SSL_VERIFYPEER, 0) + curl.setopt(pycurl.SSL_VERIFYHOST, 0) + + result.passed = None + + if test_config.interactive: + print("===================================") + print("%s" % mytest.name) + print("-----------------------------------") + print("REQUEST:") + print("%s %s" % (templated_test.method, templated_test.url)) + print("HEADERS:") + print("%s" % (templated_test.headers)) + if mytest.body is not None: + print("\n%s" % templated_test.body) + raw_input("Press ENTER when ready (%d): " % (mytest.delay)) + + if mytest.delay > 0: + print("Delaying for %ds" % mytest.delay) + time.sleep(mytest.delay) + + try: + curl.perform() # Run the actual call + except Exception as e: + # Curl exception occurred (network error), do not pass go, do not + # collect $200 + trace = traceback.format_exc() + result.failures.append(Failure(message="Curl Exception: {0}".format( + e), details=trace, failure_type=validators.FAILURE_CURL_EXCEPTION)) + result.passed = False + curl.close() + return result + + # Retrieve values + result.body = body.getvalue() + body.close() + result.response_headers = text_type(headers.getvalue(), HEADER_ENCODING) # Per RFC 2616 + headers.close() + + response_code = curl.getinfo(pycurl.RESPONSE_CODE) + result.response_code = response_code + + callbacks.log_intermediate("Initial Test Result, based on expected response code: " + + str(response_code in mytest.expected_status)) + + if response_code in mytest.expected_status: + result.passed = True + else: + # Invalid response code + result.passed = False + failure_message = "Invalid HTTP response code: response code {0} not in expected codes [{1}]".format( + response_code, mytest.expected_status) + result.failures.append(Failure( + message=failure_message, details=None, failure_type=validators.FAILURE_INVALID_RESPONSE)) + + # Parse HTTP headers + try: + result.response_headers = parse_headers(result.response_headers) + except Exception as e: + trace = traceback.format_exc() + result.failures.append(Failure(message="Header parsing exception: {0}".format( + e), details=trace, failure_type=validators.FAILURE_TEST_EXCEPTION)) + result.passed = False + curl.close() + return result + + # print str(test_config.print_bodies) + ',' + str(not result.passed) + ' , + # ' + str(test_config.print_bodies or not result.passed) + + head = result.response_headers + + # execute validator on body + if result.passed is True: + body = result.body + if mytest.validators is not None and isinstance(mytest.validators, list): + callbacks.log_intermediate("executing this many validators: " + + str(len(mytest.validators))) + failures = result.failures + for validator in mytest.validators: + validate_result = validator.validate( + body=body, headers=head, context=my_context) + if not validate_result: + result.passed = False + # Proxy for checking if it is a Failure object, because of + # import issues with isinstance there + if hasattr(validate_result, 'details'): + failures.append(validate_result) + # TODO add printing of validation for interactive mode + else: + callbacks.log_intermediate("no validators found") + + # Only do context updates if test was successful + mytest.update_context_after(result.body, head, my_context) + + # Print response body if override is set to print all *OR* if test failed + # (to capture maybe a stack trace) + if test_config.print_bodies or not result.passed: + if test_config.interactive: + print("RESPONSE:") + print(result.body.decode(ESCAPE_DECODING)) + + if test_config.print_headers or not result.passed: + if test_config.interactive: + print("RESPONSE HEADERS:") + print(result.response_headers) + + # TODO add string escape on body output + callbacks.log_intermediate(result) + + return result + def configure_curl(self, timeout=DEFAULT_TIMEOUT, context=None, curl_handle=None): """ Create and mostly configure a curl object for test, reusing existing if possible """ From 2f7a804c4864c971edf3e2eb990f31fd9057b0a8 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 19:26:47 -0500 Subject: [PATCH 03/14] Move benchmark execution method into benchmark macro --- profile_benchmark.py | 2 +- pyresttest/benchmarks.py | 171 ++++++++++++++++++++++++++++++++ pyresttest/functionaltest.py | 5 +- pyresttest/macros.py | 11 ++- pyresttest/resttest.py | 179 +--------------------------------- pyresttest/test_benchmarks.py | 48 +++++++++ pyresttest/test_resttest.py | 83 ---------------- 7 files changed, 233 insertions(+), 266 deletions(-) diff --git a/profile_benchmark.py b/profile_benchmark.py index dc2f3eb2..96b4f0a4 100755 --- a/profile_benchmark.py +++ b/profile_benchmark.py @@ -34,4 +34,4 @@ context.add_generator('gen', factory_generate_ids(starting_id=10)()) test.generator_binds = {'id': 'gen'} print 'Running templated PUT test' -cProfile.run('resttest.run_benchmark(test, context=context)', sort='cumtime') +cProfile.run('test.execute_macro(context=context)', sort='cumtime') diff --git a/pyresttest/benchmarks.py b/pyresttest/benchmarks.py index f81a91fb..5704e47c 100644 --- a/pyresttest/benchmarks.py +++ b/pyresttest/benchmarks.py @@ -3,10 +3,12 @@ import pycurl import sys +from .binding import Context from . import tests from .tests import Test from . import parsing from .parsing import * +from .macros import * # Python 2/3 switches if sys.version_info[0] > 2: @@ -184,6 +186,91 @@ def __init__(self): def __str__(self): return json.dumps(self, default=safe_to_json) + def execute_macro(self, context=None, test_config=TestConfig(), cmdline_args=None, callbacks=MacroCallbacks(), *args, **kwargs): + """ Perform a benchmark, (re)using a given, configured CURL call to do so + The actual analysis of metrics is performed separately, to allow for testing + """ + + benchmark = self + + # Context handling + my_context = context + if my_context is None: + my_context = Context() + + warmup_runs = benchmark.warmup_runs + benchmark_runs = benchmark.benchmark_runs + message = '' # Message is name of benchmark... print it? + + if (benchmark_runs <= 0): + raise Exception( + "Invalid number of benchmark runs, must be > 0 :" + benchmark_runs) + + result = TestResponse() + + # TODO create and use a curl-returning configuration function + # TODO create and use a post-benchmark cleanup function + # They should use is_dynamic/is_context_modifier to determine if they need to + # worry about context and re-reading/retemplating and only do it if needed + # - Also, they will need to be smart enough to handle extraction functions + # For performance reasons, we don't want to re-run templating/extraction if + # we do not need to, and do not want to save request bodies. + + # Initialize variables to store output + output = BenchmarkResult() + output.name = benchmark.name + output.group = benchmark.group + metricnames = list(benchmark.metrics) + # Metric variable for curl, to avoid hash lookup for every metric name + metricvalues = [METRICS[name] for name in metricnames] + # Initialize arrays to store results for each metric + results = [list() for x in xrange(0, len(metricnames))] + curl = pycurl.Curl() + + # Benchmark warm-up to allow for caching, JIT compiling, on client + callbacks.log_status('Warmup: ' + message + ' started') + for x in xrange(0, warmup_runs): + benchmark.update_context_before(my_context) + templated = benchmark.realize(my_context) + curl = templated.configure_curl( + timeout=test_config.timeout, context=my_context, curl_handle=curl) + # Do not store actual response body at all. + curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) + curl.perform() + + callbacks.log_status('Warmup: ' + message + ' finished') + + callbacks.log_status('Benchmark: ' + message + ' starting') + + for x in xrange(0, benchmark_runs): # Run the actual benchmarks + # Setup benchmark + benchmark.update_context_before(my_context) + templated = benchmark.realize(my_context) + curl = templated.configure_curl( + timeout=test_config.timeout, context=my_context, curl_handle=curl) + # Do not store actual response body at all. + curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) + + try: # Run the curl call, if it errors, then add to failure counts for benchmark + curl.perform() + except Exception: + output.failures = output.failures + 1 + curl.close() + curl = pycurl.Curl() + continue # Skip metrics collection + + # Get all metrics values for this run, and store to metric lists + for i in xrange(0, len(metricnames)): + results[i].append(curl.getinfo(metricvalues[i])) + + callbacks.log_status('Benchmark: ' + message + ' ending') + + temp_results = dict() + for i in xrange(0, len(metricnames)): + temp_results[metricnames[i]] = results[i] + output.results = temp_results + return analyze_benchmark_results(output, benchmark) + def realize_partial(self, context=None): """ Attempt to template out what is possible for this benchmark """ @@ -270,3 +357,87 @@ def parse_benchmark(base_url, node): "Invalid benchmark metric datatype: " + str(value)) return benchmark + +def analyze_benchmark_results(benchmark_result, benchmark): + """ Take a benchmark result containing raw benchmark results, and do aggregation by + applying functions + + Aggregates come out in format of metricname, aggregate_name, result """ + + output = BenchmarkResult() + output.name = benchmark_result.name + output.group = benchmark_result.group + output.failures = benchmark_result.failures + + # Copy raw metric arrays over where necessary + raw_results = benchmark_result.results + temp = dict() + for metric in benchmark.raw_metrics: + temp[metric] = raw_results[metric] + output.results = temp + + # Compute aggregates for each metric, and add tuples to aggregate results + aggregate_results = list() + for metricname, aggregate_list in benchmark.aggregated_metrics.items(): + numbers = raw_results[metricname] + for aggregate_name in aggregate_list: + if numbers: # Only compute aggregates if numbers exist + aggregate_function = AGGREGATES[aggregate_name] + aggregate_results.append( + (metricname, aggregate_name, aggregate_function(numbers))) + else: + aggregate_results.append((metricname, aggregate_name, None)) + + output.aggregates = aggregate_results + return output + + +def metrics_to_tuples(raw_metrics): + """ Converts metric dictionary of name:values_array into list of tuples + Use case: writing out benchmark to CSV, etc + + Input: + {'metric':[value1,value2...], 'metric2':[value1,value2,...]...} + + Output: list, with tuple header row, then list of tuples of values + [('metric','metric',...), (metric1_value1,metric2_value1, ...) ... ] + """ + if not isinstance(raw_metrics, dict): + raise TypeError("Input must be dictionary!") + + metrics = sorted(raw_metrics.keys()) + arrays = [raw_metrics[metric] for metric in metrics] + + num_rows = len(arrays[0]) # Assume all same size or this fails + output = list() + output.append(tuple(metrics)) # Add headers + + # Create list of tuples mimicking 2D array from input + for row in xrange(0, num_rows): + new_row = tuple([arrays[col][row] for col in xrange(0, len(arrays))]) + output.append(new_row) + return output + + +def write_benchmark_json(file_out, benchmark_result, benchmark, test_config=TestConfig()): + """ Writes benchmark to file as json """ + json.dump(benchmark_result, file_out, default=safe_to_json) + + +def write_benchmark_csv(file_out, benchmark_result, benchmark, test_config=TestConfig()): + """ Writes benchmark to file as csv """ + writer = csv.writer(file_out) + writer.writerow(('Benchmark', benchmark_result.name)) + writer.writerow(('Benchmark Group', benchmark_result.group)) + writer.writerow(('Failures', benchmark_result.failures)) + + # Write result arrays + if benchmark_result.results: + writer.writerow(('Results', '')) + writer.writerows(metrics_to_tuples(benchmark_result.results)) + if benchmark_result.aggregates: + writer.writerow(('Aggregates', '')) + writer.writerows(benchmark_result.aggregates) + +# Method to call when writing benchmark file +OUTPUT_METHODS = {u'csv': write_benchmark_csv, u'json': write_benchmark_json} diff --git a/pyresttest/functionaltest.py b/pyresttest/functionaltest.py index 1608a0ba..4dbe5a31 100644 --- a/pyresttest/functionaltest.py +++ b/pyresttest/functionaltest.py @@ -11,6 +11,7 @@ from . import tests from .tests import Test +from . import benchmarks from . import binding from .binding import Context from . import resttest @@ -307,11 +308,11 @@ def test_unicode_use(self): def test_benchmark_get(self): """ Benchmark basic local get test """ - benchmark_config = resttest.Benchmark() + benchmark_config = benchmarks.Benchmark() benchmark_config.url = self.prefix + '/api/person/' benchmark_config.add_metric( 'total_time').add_metric('total_time', 'median') - benchmark_result = resttest.run_benchmark(benchmark_config) + benchmark_result = benchmark_config.execute_macro() print("Benchmark - median request time: " + str(benchmark_result.aggregates[0])) self.assertTrue(benchmark_config.benchmark_runs, len( diff --git a/pyresttest/macros.py b/pyresttest/macros.py index 40c8b7db..0c359952 100644 --- a/pyresttest/macros.py +++ b/pyresttest/macros.py @@ -24,13 +24,16 @@ def resolve_option(name, object_self, test_config, cmdline_args): return v return None -class MacroCallbacks(object): +class MacroCallbacks(object): # Possibly call this an execution context? """ Callbacks bundle to handle reporting """ + + # Logging outputs def start_macro(self, input): lambda x: None def end_macro(self, input): lambda x: None - def pre_request(self, input): lambda x: None - def post_request(self, input): lambda x: None - def log_intermediate(self, input): lambda x: None + def pre_request(self, input): lambda x: None # Called just before submitting requests + def post_request(self, input): lambda x: None # Called just after submitting requests + def log_status(self, input): lambda x: None # Logs status info + def log_intermediate(self, input): lambda x: None # Logs debug results while running class TestConfig(object): """ Configuration for a test run """ diff --git a/pyresttest/resttest.py b/pyresttest/resttest.py index 1635e3f7..85365651 100644 --- a/pyresttest/resttest.py +++ b/pyresttest/resttest.py @@ -18,7 +18,6 @@ except ImportError: from io import BytesIO as MyIO -ESCAPE_DECODING = 'string-escape' # Python 3 compatibility if sys.version_info[0] > 2: from past.builtins import basestring @@ -173,179 +172,6 @@ def read_file(path): f.close() return string - - - - -def run_benchmark(benchmark, test_config=TestConfig(), context=None, *args, **kwargs): - """ Perform a benchmark, (re)using a given, configured CURL call to do so - The actual analysis of metrics is performed separately, to allow for testing - """ - - # Context handling - my_context = context - if my_context is None: - my_context = Context() - - warmup_runs = benchmark.warmup_runs - benchmark_runs = benchmark.benchmark_runs - message = '' # Message is name of benchmark... print it? - - if (benchmark_runs <= 0): - raise Exception( - "Invalid number of benchmark runs, must be > 0 :" + benchmark_runs) - - result = TestResponse() - - # TODO create and use a curl-returning configuration function - # TODO create and use a post-benchmark cleanup function - # They should use is_dynamic/is_context_modifier to determine if they need to - # worry about context and re-reading/retemplating and only do it if needed - # - Also, they will need to be smart enough to handle extraction functions - # For performance reasons, we don't want to re-run templating/extraction if - # we do not need to, and do not want to save request bodies. - - # Initialize variables to store output - output = BenchmarkResult() - output.name = benchmark.name - output.group = benchmark.group - metricnames = list(benchmark.metrics) - # Metric variable for curl, to avoid hash lookup for every metric name - metricvalues = [METRICS[name] for name in metricnames] - # Initialize arrays to store results for each metric - results = [list() for x in xrange(0, len(metricnames))] - curl = pycurl.Curl() - - # Benchmark warm-up to allow for caching, JIT compiling, on client - logger.info('Warmup: ' + message + ' started') - for x in xrange(0, warmup_runs): - benchmark.update_context_before(my_context) - templated = benchmark.realize(my_context) - curl = templated.configure_curl( - timeout=test_config.timeout, context=my_context, curl_handle=curl) - # Do not store actual response body at all. - curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) - curl.perform() - - logger.info('Warmup: ' + message + ' finished') - - logger.info('Benchmark: ' + message + ' starting') - - for x in xrange(0, benchmark_runs): # Run the actual benchmarks - # Setup benchmark - benchmark.update_context_before(my_context) - templated = benchmark.realize(my_context) - curl = templated.configure_curl( - timeout=test_config.timeout, context=my_context, curl_handle=curl) - # Do not store actual response body at all. - curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) - - try: # Run the curl call, if it errors, then add to failure counts for benchmark - curl.perform() - except Exception: - output.failures = output.failures + 1 - curl.close() - curl = pycurl.Curl() - continue # Skip metrics collection - - # Get all metrics values for this run, and store to metric lists - for i in xrange(0, len(metricnames)): - results[i].append(curl.getinfo(metricvalues[i])) - - logger.info('Benchmark: ' + message + ' ending') - - temp_results = dict() - for i in xrange(0, len(metricnames)): - temp_results[metricnames[i]] = results[i] - output.results = temp_results - return analyze_benchmark_results(output, benchmark) - - -def analyze_benchmark_results(benchmark_result, benchmark): - """ Take a benchmark result containing raw benchmark results, and do aggregation by - applying functions - - Aggregates come out in format of metricname, aggregate_name, result """ - - output = BenchmarkResult() - output.name = benchmark_result.name - output.group = benchmark_result.group - output.failures = benchmark_result.failures - - # Copy raw metric arrays over where necessary - raw_results = benchmark_result.results - temp = dict() - for metric in benchmark.raw_metrics: - temp[metric] = raw_results[metric] - output.results = temp - - # Compute aggregates for each metric, and add tuples to aggregate results - aggregate_results = list() - for metricname, aggregate_list in benchmark.aggregated_metrics.items(): - numbers = raw_results[metricname] - for aggregate_name in aggregate_list: - if numbers: # Only compute aggregates if numbers exist - aggregate_function = AGGREGATES[aggregate_name] - aggregate_results.append( - (metricname, aggregate_name, aggregate_function(numbers))) - else: - aggregate_results.append((metricname, aggregate_name, None)) - - output.aggregates = aggregate_results - return output - - -def metrics_to_tuples(raw_metrics): - """ Converts metric dictionary of name:values_array into list of tuples - Use case: writing out benchmark to CSV, etc - - Input: - {'metric':[value1,value2...], 'metric2':[value1,value2,...]...} - - Output: list, with tuple header row, then list of tuples of values - [('metric','metric',...), (metric1_value1,metric2_value1, ...) ... ] - """ - if not isinstance(raw_metrics, dict): - raise TypeError("Input must be dictionary!") - - metrics = sorted(raw_metrics.keys()) - arrays = [raw_metrics[metric] for metric in metrics] - - num_rows = len(arrays[0]) # Assume all same size or this fails - output = list() - output.append(tuple(metrics)) # Add headers - - # Create list of tuples mimicking 2D array from input - for row in xrange(0, num_rows): - new_row = tuple([arrays[col][row] for col in xrange(0, len(arrays))]) - output.append(new_row) - return output - - -def write_benchmark_json(file_out, benchmark_result, benchmark, test_config=TestConfig()): - """ Writes benchmark to file as json """ - json.dump(benchmark_result, file_out, default=safe_to_json) - - -def write_benchmark_csv(file_out, benchmark_result, benchmark, test_config=TestConfig()): - """ Writes benchmark to file as csv """ - writer = csv.writer(file_out) - writer.writerow(('Benchmark', benchmark_result.name)) - writer.writerow(('Benchmark Group', benchmark_result.group)) - writer.writerow(('Failures', benchmark_result.failures)) - - # Write result arrays - if benchmark_result.results: - writer.writerow(('Results', '')) - writer.writerows(metrics_to_tuples(benchmark_result.results)) - if benchmark_result.aggregates: - writer.writerow(('Aggregates', '')) - writer.writerows(benchmark_result.aggregates) - -# Method to call when writing benchmark file -OUTPUT_METHODS = {u'csv': write_benchmark_csv, u'json': write_benchmark_json} - - def log_failure(failure, context=None, test_config=TestConfig()): """ Log a failure from a test """ logger.error("Test Failure, failure type: {0}, Reason: {1}".format( @@ -355,6 +181,8 @@ def log_failure(failure, context=None, test_config=TestConfig()): class LoggerCallbacks(MacroCallbacks): """ Uses a standard python logger """ + def log_status(self, input): + logger.info(input) def log_intermediate(self, input): logger.debug(input) @@ -438,8 +266,7 @@ def run_testsets(testsets): logger.info("Benchmark Starting: " + benchmark.name + " Group: " + benchmark.group) - benchmark_result = run_benchmark( - benchmark, myconfig, context=context) + benchmark_result = benchmark.execute_macro(test_config=myconfig, context=context) print(benchmark_result) logger.info("Benchmark Done: " + benchmark.name + " Group: " + benchmark.group) diff --git a/pyresttest/test_benchmarks.py b/pyresttest/test_benchmarks.py index 70177e7c..95e5c225 100644 --- a/pyresttest/test_benchmarks.py +++ b/pyresttest/test_benchmarks.py @@ -112,6 +112,54 @@ def test_add_metric(self): self.assertEqual(2, len(benchmark_config.raw_metrics)) self.assertEqual(2, len(benchmark_config.aggregated_metrics.keys())) + def test_analyze_benchmark(self): + """ Test analyzing benchmarks to compute aggregates """ + benchmark_result = BenchmarkResult() + benchmark_config = Benchmark() + benchmark_config.add_metric('request_size').add_metric( + 'request_size', 'median') + benchmark_config.add_metric('connect_time') + benchmark_config.add_metric('total_time', 'mean_harmonic') + benchmark_config.add_metric('total_time', 'std_deviation') + + benchmark_result.results = { + 'connect_time': [1, 4, 7], + 'request_size': [7, 8, 10], + 'total_time': [0.5, 0.7, 0.9] + } + + analyzed = analyze_benchmark_results( + benchmark_result, benchmark_config) + self.assertEqual(2, len(analyzed.results.keys())) + + # Check that number of measurements is sane + distinct_metrics = set([x[0] for x in analyzed.aggregates]) + distinct_aggregates = set([x[1] for x in analyzed.aggregates]) + self.assertEqual(2, len(distinct_metrics)) + self.assertEqual(3, len(distinct_aggregates)) + self.assertEqual(3, len(analyzed.aggregates)) + + def test_metrics_to_tuples(self): + """ Test method to build list(tuples) from raw metrics """ + array1 = [-1, 5.6, 0] + array2 = [3.2, -81, 800] + array3 = [97, -3.4, 'cheese'] + keys = sorted(['blah', 'foo', 'bar']) + metrics = {keys[0]: array1, keys[1]: array2, keys[2]: array3} + + packed = metrics_to_tuples(metrics) + headers = packed[0] + + # Check header generation + for x in xrange(0, len(keys)): + self.assertEqual(keys[x], headers[x]) + + # Check data was correctly converted to 2D format, in order of input + for x in xrange(1, len(array1) + 1): + my_tuple = packed[x] + self.assertEqual(array1[x - 1], my_tuple[0]) + self.assertEqual(array2[x - 1], my_tuple[1]) + self.assertEqual(array3[x - 1], my_tuple[2]) if __name__ == '__main__': unittest.main() diff --git a/pyresttest/test_resttest.py b/pyresttest/test_resttest.py index 8b2c7e27..6f2b9dfc 100644 --- a/pyresttest/test_resttest.py +++ b/pyresttest/test_resttest.py @@ -11,89 +11,6 @@ class TestRestTest(unittest.TestCase): """ Tests to test overall REST testing framework, how meta is that? """ - def test_analyze_benchmark(self): - """ Test analyzing benchmarks to compute aggregates """ - benchmark_result = BenchmarkResult() - benchmark_config = Benchmark() - benchmark_config.add_metric('request_size').add_metric( - 'request_size', 'median') - benchmark_config.add_metric('connect_time') - benchmark_config.add_metric('total_time', 'mean_harmonic') - benchmark_config.add_metric('total_time', 'std_deviation') - - benchmark_result.results = { - 'connect_time': [1, 4, 7], - 'request_size': [7, 8, 10], - 'total_time': [0.5, 0.7, 0.9] - } - - analyzed = analyze_benchmark_results( - benchmark_result, benchmark_config) - self.assertEqual(2, len(analyzed.results.keys())) - - # Check that number of measurements is sane - distinct_metrics = set([x[0] for x in analyzed.aggregates]) - distinct_aggregates = set([x[1] for x in analyzed.aggregates]) - self.assertEqual(2, len(distinct_metrics)) - self.assertEqual(3, len(distinct_aggregates)) - self.assertEqual(3, len(analyzed.aggregates)) - - def test_metrics_to_tuples(self): - """ Test method to build list(tuples) from raw metrics """ - array1 = [-1, 5.6, 0] - array2 = [3.2, -81, 800] - array3 = [97, -3.4, 'cheese'] - keys = sorted(['blah', 'foo', 'bar']) - metrics = {keys[0]: array1, keys[1]: array2, keys[2]: array3} - - packed = metrics_to_tuples(metrics) - headers = packed[0] - - # Check header generation - for x in xrange(0, len(keys)): - self.assertEqual(keys[x], headers[x]) - - # Check data was correctly converted to 2D format, in order of input - for x in xrange(1, len(array1) + 1): - my_tuple = packed[x] - self.assertEqual(array1[x - 1], my_tuple[0]) - self.assertEqual(array2[x - 1], my_tuple[1]) - self.assertEqual(array3[x - 1], my_tuple[2]) - - def test_parse_headers(self): - """ Basic header parsing tests """ - headerstring = u'HTTP/1.1 200 OK\r\nDate: Mon, 29 Dec 2014 02:42:33 GMT\r\nExpires: -1\r\nCache-Control: private, max-age=0\r\nContent-Type: text/html; charset=ISO-8859-1\r\nX-XSS-Protection: 1; mode=block\r\nX-Frame-Options: SAMEORIGIN\r\nAlternate-Protocol: 80:quic,p=0.02\r\nTransfer-Encoding: chunked\r\n\r\n' - header_list = resttest.parse_headers(headerstring) - header_dict = dict(header_list) - - self.assertTrue(isinstance(header_list, list)) - self.assertEqual('-1', header_dict['expires']) - self.assertEqual('private, max-age=0', header_dict['cache-control']) - self.assertEqual(8, len(header_dict)) - - # Error cases - # No headers - result = resttest.parse_headers("") # Shouldn't throw exception - self.assertTrue(isinstance(result, list)) - self.assertEqual(0, len(result)) - - # Just the HTTP prefix - result = resttest.parse_headers( - 'HTTP/1.1 200 OK\r\n\r\n') # Shouldn't throw exception - self.assertTrue(isinstance(result, list)) - self.assertEqual(0, len(result)) - - def test_parse_headers_multiples(self): - """ Test headers where there are duplicate values set """ - headerstring = u'HTTP/1.1 200 OK\r\nDate: Mon, 29 Dec 2014 02:42:33 GMT\r\nAccept: text/html\r\nAccept: application/json\r\n\r\n' - headers = resttest.parse_headers(headerstring) - - self.assertTrue(isinstance(headers, list)) - self.assertEqual(3, len(headers)) - self.assertEqual(('date', 'Mon, 29 Dec 2014 02:42:33 GMT'), headers[0]) - self.assertEqual(('accept', 'text/html'), headers[1]) - self.assertEqual(('accept', 'application/json'), headers[2]) - def test_jmespath_import(self): """ Verify that JMESPath extractor loads if class present """ From cb0188a33b65b7ce292623118a5c7158763ee6f3 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 19:54:13 -0500 Subject: [PATCH 04/14] Rename TestConfig to TestSetConfig --- advanced_guide.md | 2 +- pyresttest/benchmarks.py | 10 +++++----- pyresttest/macros.py | 40 ++++++++++++++++++++++------------------ pyresttest/resttest.py | 20 ++++++++++---------- pyresttest/test_tests.py | 20 ++++++++++---------- pyresttest/tests.py | 22 +++++++++++----------- 6 files changed, 59 insertions(+), 55 deletions(-) diff --git a/advanced_guide.md b/advanced_guide.md index ae411333..d3ad8125 100644 --- a/advanced_guide.md +++ b/advanced_guide.md @@ -402,7 +402,7 @@ Validate against a schema in file 'miniapp-schema.json' 1. Parse command line arguments 2. Parse YAML, reading top-level imports and building TestSets 3. Execute TestSets: - 1. Generate a Context for each test set, populated with generators and variables defined in the TestConfig + 1. Generate a Context for each test set, populated with generators and variables defined in the TestSetConfig 2. Run each test in the test set, using the context, per the test lifecycle below * Print failures as they occur 3. Add statistics from that test to information for that test's group diff --git a/pyresttest/benchmarks.py b/pyresttest/benchmarks.py index 5704e47c..2d7635b4 100644 --- a/pyresttest/benchmarks.py +++ b/pyresttest/benchmarks.py @@ -186,7 +186,7 @@ def __init__(self): def __str__(self): return json.dumps(self, default=safe_to_json) - def execute_macro(self, context=None, test_config=TestConfig(), cmdline_args=None, callbacks=MacroCallbacks(), *args, **kwargs): + def execute_macro(self, context=None, testset_config=TestSetConfig(), cmdline_args=None, callbacks=MacroCallbacks(), *args, **kwargs): """ Perform a benchmark, (re)using a given, configured CURL call to do so The actual analysis of metrics is performed separately, to allow for testing """ @@ -233,7 +233,7 @@ def execute_macro(self, context=None, test_config=TestConfig(), cmdline_args=Non benchmark.update_context_before(my_context) templated = benchmark.realize(my_context) curl = templated.configure_curl( - timeout=test_config.timeout, context=my_context, curl_handle=curl) + timeout=testset_config.timeout, context=my_context, curl_handle=curl) # Do not store actual response body at all. curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) curl.perform() @@ -247,7 +247,7 @@ def execute_macro(self, context=None, test_config=TestConfig(), cmdline_args=Non benchmark.update_context_before(my_context) templated = benchmark.realize(my_context) curl = templated.configure_curl( - timeout=test_config.timeout, context=my_context, curl_handle=curl) + timeout=testset_config.timeout, context=my_context, curl_handle=curl) # Do not store actual response body at all. curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) @@ -419,12 +419,12 @@ def metrics_to_tuples(raw_metrics): return output -def write_benchmark_json(file_out, benchmark_result, benchmark, test_config=TestConfig()): +def write_benchmark_json(file_out, benchmark_result, benchmark, testset_config=TestSetConfig()): """ Writes benchmark to file as json """ json.dump(benchmark_result, file_out, default=safe_to_json) -def write_benchmark_csv(file_out, benchmark_result, benchmark, test_config=TestConfig()): +def write_benchmark_csv(file_out, benchmark_result, benchmark, testset_config=TestSetConfig()): """ Writes benchmark to file as csv """ writer = csv.writer(file_out) writer.writerow(('Benchmark', benchmark_result.name)) diff --git a/pyresttest/macros.py b/pyresttest/macros.py index 0c359952..c2802970 100644 --- a/pyresttest/macros.py +++ b/pyresttest/macros.py @@ -15,10 +15,10 @@ DEFAULT_TIMEOUT = 10 # Seconds, FIXME remove from the tests class and move to here HEADER_ENCODING ='ISO-8859-1' # Per RFC 2616 -def resolve_option(name, object_self, test_config, cmdline_args): +def resolve_option(name, object_self, testset_config, cmdline_args): """ Look for a specific field name in a set of objects return value if found, return none if not found """ - for i in (object_self, test_config, cmdline_args): + for i in (object_self, testset_config, cmdline_args): v = gettattr(i, name, None) if v is not None: return v @@ -27,15 +27,19 @@ def resolve_option(name, object_self, test_config, cmdline_args): class MacroCallbacks(object): # Possibly call this an execution context? """ Callbacks bundle to handle reporting """ - # Logging outputs + # Logging outputs, these are part of the lifecycle def start_macro(self, input): lambda x: None - def end_macro(self, input): lambda x: None def pre_request(self, input): lambda x: None # Called just before submitting requests def post_request(self, input): lambda x: None # Called just after submitting requests + def end_macro(self, input): lambda x: None + + # These can be called at any point, theoretically + def log_success(self, input): lambda x: None + def log_failure(self, input): lambda x: None def log_status(self, input): lambda x: None # Logs status info def log_intermediate(self, input): lambda x: None # Logs debug results while running -class TestConfig(object): +class TestSetConfig(object): """ Configuration for a test run """ timeout = DEFAULT_TIMEOUT # timeout of tests, in seconds print_bodies = False # Print response bodies in all cases @@ -58,10 +62,10 @@ class TestSet(object): """ Encapsulates a set of tests and test configuration for them """ tests = list() benchmarks = list() - config = TestConfig() + config = TestSetConfig() def __init__(self): - self.config = TestConfig() + self.config = TestSetConfig() self.tests = list() self.benchmarks = list() @@ -129,29 +133,29 @@ def parse_headers(header_string): def parse_configuration(node, base_config=None): """ Parse input config to configuration information """ - test_config = base_config - if not test_config: - test_config = TestConfig() + testset_config = base_config + if not testset_config: + testset_config = TestSetConfig() node = lowercase_keys(flatten_dictionaries(node)) # Make it usable for key, value in node.items(): if key == u'timeout': - test_config.timeout = int(value) + testset_config.timeout = int(value) elif key == u'print_bodies': - test_config.print_bodies = safe_to_bool(value) + testset_config.print_bodies = safe_to_bool(value) elif key == u'retries': - test_config.retries = int(value) + testset_config.retries = int(value) elif key == u'variable_binds': - if not test_config.variable_binds: - test_config.variable_binds = dict() - test_config.variable_binds.update(flatten_dictionaries(value)) + if not testset_config.variable_binds: + testset_config.variable_binds = dict() + testset_config.variable_binds.update(flatten_dictionaries(value)) elif key == u'generators': flat = flatten_dictionaries(value) gen_map = dict() for generator_name, generator_config in flat.items(): gen = parse_generator(generator_config) gen_map[str(generator_name)] = gen - test_config.generators = gen_map + testset_config.generators = gen_map - return test_config \ No newline at end of file + return testset_config \ No newline at end of file diff --git a/pyresttest/resttest.py b/pyresttest/resttest.py index 85365651..85cb24a3 100644 --- a/pyresttest/resttest.py +++ b/pyresttest/resttest.py @@ -116,7 +116,7 @@ def parse_testsets(base_url, test_structure, test_files=set(), working_directory """ tests_out = list() - test_config = TestConfig() + testset_config = TestSetConfig() testsets = list() benchmarks = list() @@ -124,7 +124,7 @@ def parse_testsets(base_url, test_structure, test_files=set(), working_directory working_directory = os.path.abspath(os.getcwd()) if vars and isinstance(vars, dict): - test_config.variable_binds = vars + testset_config.variable_binds = vars # returns a testconfig and collection of tests for node in test_structure: # Iterate through lists of test and configuration elements @@ -156,11 +156,11 @@ def parse_testsets(base_url, test_structure, test_files=set(), working_directory benchmark = parse_benchmark(base_url, node[key]) benchmarks.append(benchmark) elif key == u'config' or key == u'configuration': - test_config = parse_configuration( - node[key], base_config=test_config) + testset_config = parse_configuration( + node[key], base_config=testset_config) testset = TestSet() testset.tests = tests_out - testset.config = test_config + testset.config = testset_config testset.benchmarks = benchmarks testsets.append(testset) return testsets @@ -172,7 +172,7 @@ def read_file(path): f.close() return string -def log_failure(failure, context=None, test_config=TestConfig()): +def log_failure(failure, context=None, testset_config=TestSetConfig()): """ Log a failure from a test """ logger.error("Test Failure, failure type: {0}, Reason: {1}".format( failure.failure_type, failure.message)) @@ -226,7 +226,7 @@ def run_testsets(testsets): group_results[test.group] = list() group_failure_counts[test.group] = 0 - result = test.execute_macro(test_config=myconfig, context=context, curl_handle=curl_handle) + result = test.execute_macro(testset_config=myconfig, context=context, curl_handle=curl_handle) result.body = None # Remove the body, save some memory! if not result.passed: # Print failure, increase failure counts for that test group @@ -238,7 +238,7 @@ def run_testsets(testsets): if result.failures: for failure in result.failures: log_failure(failure, context=context, - test_config=myconfig) + testset_config=myconfig) # Increment test failure counts for that group (adding an entry # if not present) @@ -266,7 +266,7 @@ def run_testsets(testsets): logger.info("Benchmark Starting: " + benchmark.name + " Group: " + benchmark.group) - benchmark_result = benchmark.execute_macro(test_config=myconfig, context=context) + benchmark_result = benchmark.execute_macro(testset_config=myconfig, context=context) print(benchmark_result) logger.info("Benchmark Done: " + benchmark.name + " Group: " + benchmark.group) @@ -279,7 +279,7 @@ def run_testsets(testsets): logger.debug("Benchmark writing to file: " + benchmark.output_file) write_method(my_file, benchmark_result, - benchmark, test_config=myconfig) + benchmark, testset_config=myconfig) my_file.close() if myinteractive: diff --git a/pyresttest/test_tests.py b/pyresttest/test_tests.py index 6f8ea044..c96b7897 100644 --- a/pyresttest/test_tests.py +++ b/pyresttest/test_tests.py @@ -294,14 +294,14 @@ def test_parse_validators_fail(self): def test_parse_extractor_bind(self): """ Test parsing of extractors """ - test_config = { + testset_config = { "url": '/api', 'extract_binds': { 'id': {'jsonpath_mini': 'idfield'}, 'name': {'jsonpath_mini': 'firstname'} } } - test = Test.parse_test('', test_config) + test = Test.parse_test('', testset_config) self.assertTrue(test.extract_binds) self.assertEqual(2, len(test.extract_binds)) self.assertTrue('id' in test.extract_binds) @@ -317,29 +317,29 @@ def test_parse_extractor_bind(self): def test_parse_extractor_errors(self): """ Test that expected errors are thrown on parsing """ - test_config = { + testset_config = { "url": '/api', 'extract_binds': {'id': {}} } try: - test = Test.parse_test('', test_config) + test = Test.parse_test('', testset_config) self.fail("Should throw an error when doing empty mapping") except TypeError: pass - test_config['extract_binds']['id'] = { + testset_config['extract_binds']['id'] = { 'jsonpath_mini': 'query', 'test': 'anotherquery' } try: - test = Test.parse_test('', test_config) + test = Test.parse_test('', testset_config) self.fail("Should throw an error when given multiple extractors") except ValueError as te: pass def test_parse_validator_comparator(self): """ Test parsing a comparator validator """ - test_config = { + testset_config = { 'name': 'Default', 'url': '/api', 'validators': [ @@ -348,7 +348,7 @@ def test_parse_validator_comparator(self): 'expected': {'template': '$id'}}} ] } - test = Test.parse_test('', test_config) + test = Test.parse_test('', testset_config) self.assertTrue(test.validators) self.assertEqual(1, len(test.validators)) @@ -362,7 +362,7 @@ def test_parse_validator_comparator(self): def test_parse_validator_extract_test(self): """ Tests parsing extract-test validator """ - test_config = { + testset_config = { 'name': 'Default', 'url': '/api', 'validators': [ @@ -370,7 +370,7 @@ def test_parse_validator_extract_test(self): 'test': 'exists'}} ] } - test = Test.parse_test('', test_config) + test = Test.parse_test('', testset_config) self.assertTrue(test.validators) self.assertEqual(1, len(test.validators)) diff --git a/pyresttest/tests.py b/pyresttest/tests.py index 4eca0436..f08e5127 100644 --- a/pyresttest/tests.py +++ b/pyresttest/tests.py @@ -288,7 +288,7 @@ def __init__(self): def __str__(self): return json.dumps(self, default=safe_to_json) - def execute_macro(self, test_config=TestConfig(), context=None, cmdline_args=None, callbacks=MacroCallbacks(), curl_handle=None, *args, **kwargs): + def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_args=None, callbacks=MacroCallbacks(), curl_handle=None, *args, **kwargs): """ Put together test pieces: configure & run actual test, return results """ # Initialize a context if not supplied @@ -301,7 +301,7 @@ def execute_macro(self, test_config=TestConfig(), context=None, cmdline_args=Non mytest.update_context_before(my_context) templated_test = mytest.realize(my_context) curl = templated_test.configure_curl( - timeout=test_config.timeout, context=my_context, curl_handle=curl_handle) + timeout=testset_config.timeout, context=my_context, curl_handle=curl_handle) result = TestResponse() result.test = templated_test @@ -310,15 +310,15 @@ def execute_macro(self, test_config=TestConfig(), context=None, cmdline_args=Non body = MyIO() curl.setopt(pycurl.WRITEFUNCTION, body.write) curl.setopt(pycurl.HEADERFUNCTION, headers.write) - if test_config.verbose: + if testset_config.verbose: curl.setopt(pycurl.VERBOSE, True) - if test_config.ssl_insecure: + if testset_config.ssl_insecure: curl.setopt(pycurl.SSL_VERIFYPEER, 0) curl.setopt(pycurl.SSL_VERIFYHOST, 0) result.passed = None - if test_config.interactive: + if testset_config.interactive: print("===================================") print("%s" % mytest.name) print("-----------------------------------") @@ -379,8 +379,8 @@ def execute_macro(self, test_config=TestConfig(), context=None, cmdline_args=Non curl.close() return result - # print str(test_config.print_bodies) + ',' + str(not result.passed) + ' , - # ' + str(test_config.print_bodies or not result.passed) + # print str(testset_config.print_bodies) + ',' + str(not result.passed) + ' , + # ' + str(testset_config.print_bodies or not result.passed) head = result.response_headers @@ -409,13 +409,13 @@ def execute_macro(self, test_config=TestConfig(), context=None, cmdline_args=Non # Print response body if override is set to print all *OR* if test failed # (to capture maybe a stack trace) - if test_config.print_bodies or not result.passed: - if test_config.interactive: + if testset_config.print_bodies or not result.passed: + if testset_config.interactive: print("RESPONSE:") print(result.body.decode(ESCAPE_DECODING)) - if test_config.print_headers or not result.passed: - if test_config.interactive: + if testset_config.print_headers or not result.passed: + if testset_config.interactive: print("RESPONSE HEADERS:") print(result.response_headers) From 491aea53e5b86f71167cf999bd9da3aecfbd4124 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 20:01:58 -0500 Subject: [PATCH 05/14] Fix Py3 compat import issue in benchmarks, clean up imports --- pyresttest/benchmarks.py | 2 ++ pyresttest/macros.py | 4 ++++ pyresttest/resttest.py | 13 ------------- 3 files changed, 6 insertions(+), 13 deletions(-) diff --git a/pyresttest/benchmarks.py b/pyresttest/benchmarks.py index 2d7635b4..de80b7fd 100644 --- a/pyresttest/benchmarks.py +++ b/pyresttest/benchmarks.py @@ -2,6 +2,7 @@ import json import pycurl import sys +import csv from .binding import Context from . import tests @@ -13,6 +14,7 @@ # Python 2/3 switches if sys.version_info[0] > 2: from past.builtins import basestring + from builtins import range as xrange # Python 3 compatibility shims from . import six diff --git a/pyresttest/macros.py b/pyresttest/macros.py index c2802970..e2d6f3c2 100644 --- a/pyresttest/macros.py +++ b/pyresttest/macros.py @@ -12,6 +12,10 @@ # This is all our general execution framework stuff + HTTP request stuff ESCAPE_DECODING = 'string-escape' +# Python 2/3 switches +if sys.version_info[0] > 2: + ESCAPE_DECODING = 'unicode_escape' + DEFAULT_TIMEOUT = 10 # Seconds, FIXME remove from the tests class and move to here HEADER_ENCODING ='ISO-8859-1' # Per RFC 2616 diff --git a/pyresttest/resttest.py b/pyresttest/resttest.py index 85cb24a3..c3354783 100644 --- a/pyresttest/resttest.py +++ b/pyresttest/resttest.py @@ -5,24 +5,11 @@ import traceback import yaml import pycurl -import json -import csv import logging -import time - -try: - from cStringIO import StringIO as MyIO -except: - try: - from StringIO import StringIO as MyIO - except ImportError: - from io import BytesIO as MyIO # Python 3 compatibility if sys.version_info[0] > 2: from past.builtins import basestring - from builtins import range as xrange - ESCAPE_DECODING = 'unicode_escape' # Dirty hack to allow for running this as a script :-/ if __name__ == '__main__': From d5c12034446631cdb19d8da60ca6a5fedc8312e9 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 21:23:00 -0500 Subject: [PATCH 06/14] Create a Macro parent object for simplified testing of test/benchmark logic --- pyresttest/macros.py | 48 ++++++++++++++++++++++++++++++++------- pyresttest/resttest.py | 4 ++++ pyresttest/test_macros.py | 39 +++++++++++++++++++++++++++++++ pyresttest/tests.py | 6 +---- 4 files changed, 84 insertions(+), 13 deletions(-) diff --git a/pyresttest/macros.py b/pyresttest/macros.py index e2d6f3c2..db200d1b 100644 --- a/pyresttest/macros.py +++ b/pyresttest/macros.py @@ -1,9 +1,3 @@ -import sys -from email import message_from_string # For headers handling - -from .generators import parse_generator -from .parsing import * - # Contains all the framework-general items for macros # This allows it to be separated from resttest.py # This way macros (test/benchmark/etc) can import shared methods @@ -11,6 +5,12 @@ # This is all our general execution framework stuff + HTTP request stuff +import sys +from email import message_from_string # For headers handling + +from .generators import parse_generator +from .parsing import * + ESCAPE_DECODING = 'string-escape' # Python 2/3 switches if sys.version_info[0] > 2: @@ -44,7 +44,7 @@ def log_status(self, input): lambda x: None # Logs status info def log_intermediate(self, input): lambda x: None # Logs debug results while running class TestSetConfig(object): - """ Configuration for a test run """ + """ Configuration shared across all tests in a testset """ timeout = DEFAULT_TIMEOUT # timeout of tests, in seconds print_bodies = False # Print response bodies in all cases print_headers = False # Print response bodies in all cases @@ -63,7 +63,9 @@ def __str__(self): return json.dumps(self, default=safe_to_json) class TestSet(object): - """ Encapsulates a set of tests and test configuration for them """ + """ Encapsulates a set of tests/benchmarks and test configuration for them + This is analogous to a unittest TestSuite + """ tests = list() benchmarks = list() config = TestSetConfig() @@ -76,6 +78,36 @@ def __init__(self): def __str__(self): return json.dumps(self, default=safe_to_json) +class Macro(object): + """ Common functionality used by tests, benchmarks, etc + Maps to a unittest TestCase, but only roughly + This is the parent class of a Test/Benchmark/etc + """ + + name = u'Unnamed' + macro_name = None + + def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_args=None, callbacks=MacroCallbacks(), curl_handle=None, *args, **kwargs): + """ Skeletal execution basis """ + + callbacks.start_macro(self.name) + callbacks.pre_request('Pre-request: no request to run') + callbacks.post_request('Post-request: no request to run') + callbacks.log_success('Empty macro always succeeds') + callbacks.end_macro(self.name) + + def is_context_modifier(self): + """ If a macro does not modify the context, it can be executed in parallel """ + return False + + def is_dynamic(self): + """ Does the test use variables to template fields? If not, it can be executed with no templats """ + return False + + @staticmethod + def parse(config, *args, **kwargs): # TODO Wire me into testset parsing + """ Parses the supplied config object from YAML, using arguments and return configured instance """ + return None class BenchmarkResult(object): """ Stores results from a benchmark for reporting use """ diff --git a/pyresttest/resttest.py b/pyresttest/resttest.py index c3354783..ab8426de 100644 --- a/pyresttest/resttest.py +++ b/pyresttest/resttest.py @@ -172,6 +172,10 @@ def log_status(self, input): logger.info(input) def log_intermediate(self, input): logger.debug(input) + def log_failure(self, input): + logger.error(input) + def log_success(self, input): + logger.info(input) def run_testsets(testsets): """ Execute a set of tests, using given TestSet list input """ diff --git a/pyresttest/test_macros.py b/pyresttest/test_macros.py index ecf8790c..c5709251 100644 --- a/pyresttest/test_macros.py +++ b/pyresttest/test_macros.py @@ -1,9 +1,48 @@ import unittest +import sys +import inspect from . import macros from .macros import * +PYTHON_MAJOR_VERSION = sys.version_info[0] +if PYTHON_MAJOR_VERSION > 2: + from unittest import mock +else: + import mock + +class MockingCallbacks(MacroCallbacks): + """ Mocks out all the callbacks and tracks executions in lifecycle """ + + mymocks = None + + def __init__(self): + origmethods = inspect.getmembers(MacroCallbacks, predicate=inspect.ismethod) + self.mymocks = dict() + for method in origmethods: + newmock = mock.MagicMock(name=method[0], return_value=True) + self.mymocks[method[0]]=newmock + setattr(self, method[0], newmock) + + def list_called_methods(self): + """ Return all methods that have been invoked """ + v = filter(lambda x: self.mymocks[x].called == True, self.mymocks.keys()) + print v + return v + class TestMacros(unittest.TestCase): + + def test_empty_macro_callbacks(self): + """ Test of basic macro execution path """ + mymacro = Macro() + mymacro.name = 'Sample' + + mocked_callbacks = MockingCallbacks() + mymacro.execute_macro(callbacks=mocked_callbacks) + called_list = mocked_callbacks.list_called_methods() + self.assertEqual(set(['start_macro','end_macro','post_request', 'log_success','pre_request']), + set(called_list)) + def test_parse_headers(self): """ Basic header parsing tests """ headerstring = u'HTTP/1.1 200 OK\r\nDate: Mon, 29 Dec 2014 02:42:33 GMT\r\nExpires: -1\r\nCache-Control: private, max-age=0\r\nContent-Type: text/html; charset=ISO-8859-1\r\nX-XSS-Protection: 1; mode=block\r\nX-Frame-Options: SAMEORIGIN\r\nAlternate-Protocol: 80:quic,p=0.02\r\nTransfer-Encoding: chunked\r\n\r\n' diff --git a/pyresttest/tests.py b/pyresttest/tests.py index f08e5127..6b45b860 100644 --- a/pyresttest/tests.py +++ b/pyresttest/tests.py @@ -92,7 +92,7 @@ def coerce_list_of_ints(val): else: return [int(val)] -class Test(object): +class Test(Macro): """ Describes a REST test """ _url = None expected_status = [200] # expected HTTP status code or codes @@ -117,10 +117,6 @@ class Test(object): generator_binds = None # Dict of variable name and then generator name extract_binds = None # Dict of variable name and extract function to run - @staticmethod - def has_contains(): - return 'contains' in validators.VALIDATORS - def ninja_copy(self): """ Optimization: limited copy of test object, for realize() methods This only copies fields changed vs. class, and keeps methods the same From ed7b9366e8c92cdc462972069f54b85f89c5660f Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 21:33:32 -0500 Subject: [PATCH 07/14] Use callbacks for logging everywhere, and revert to printing on callbacks if behavior not defined --- pyresttest/macros.py | 20 ++++++++++++-------- pyresttest/tests.py | 26 +++++++++++++------------- 2 files changed, 25 insertions(+), 21 deletions(-) diff --git a/pyresttest/macros.py b/pyresttest/macros.py index db200d1b..058b5769 100644 --- a/pyresttest/macros.py +++ b/pyresttest/macros.py @@ -31,17 +31,21 @@ def resolve_option(name, object_self, testset_config, cmdline_args): class MacroCallbacks(object): # Possibly call this an execution context? """ Callbacks bundle to handle reporting """ + def simple_print(self, x): + if x: + print(x) + # Logging outputs, these are part of the lifecycle - def start_macro(self, input): lambda x: None - def pre_request(self, input): lambda x: None # Called just before submitting requests - def post_request(self, input): lambda x: None # Called just after submitting requests - def end_macro(self, input): lambda x: None + def start_macro(self, input): lambda x: simple_print(x) + def pre_request(self, input): lambda x: simple_print(x) # Called just before submitting requests + def post_request(self, input): lambda x: simple_print(x) # Called just after submitting requests + def end_macro(self, input): lambda x: simple_print(x) # These can be called at any point, theoretically - def log_success(self, input): lambda x: None - def log_failure(self, input): lambda x: None - def log_status(self, input): lambda x: None # Logs status info - def log_intermediate(self, input): lambda x: None # Logs debug results while running + def log_success(self, input): lambda x: simple_print(x) + def log_failure(self, input): lambda x: simple_print(x) + def log_status(self, input): lambda x: simple_print(x) # Logs status info + def log_intermediate(self, input): lambda x: simple_print(x) # Logs debug results while running class TestSetConfig(object): """ Configuration shared across all tests in a testset """ diff --git a/pyresttest/tests.py b/pyresttest/tests.py index 6b45b860..5e384f1e 100644 --- a/pyresttest/tests.py +++ b/pyresttest/tests.py @@ -315,19 +315,19 @@ def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_ar result.passed = None if testset_config.interactive: - print("===================================") - print("%s" % mytest.name) - print("-----------------------------------") - print("REQUEST:") - print("%s %s" % (templated_test.method, templated_test.url)) - print("HEADERS:") - print("%s" % (templated_test.headers)) + callbacks.log_status("===================================") + callbacks.log_status("%s" % mytest.name) + callbacks.log_status("-----------------------------------") + callbacks.log_status("REQUEST:") + callbacks.log_status("%s %s" % (templated_test.method, templated_test.url)) + callbacks.log_status("HEADERS:") + callbacks.log_status("%s" % (templated_test.headers)) if mytest.body is not None: - print("\n%s" % templated_test.body) + callbacks.log_status("\n%s" % templated_test.body) raw_input("Press ENTER when ready (%d): " % (mytest.delay)) if mytest.delay > 0: - print("Delaying for %ds" % mytest.delay) + callbacks.log_status("Delaying for %ds" % mytest.delay) time.sleep(mytest.delay) try: @@ -407,13 +407,13 @@ def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_ar # (to capture maybe a stack trace) if testset_config.print_bodies or not result.passed: if testset_config.interactive: - print("RESPONSE:") - print(result.body.decode(ESCAPE_DECODING)) + callbacks.log_status("RESPONSE:") + callbacks.log_status(result.body.decode(ESCAPE_DECODING)) if testset_config.print_headers or not result.passed: if testset_config.interactive: - print("RESPONSE HEADERS:") - print(result.response_headers) + callbacks.log_status("RESPONSE HEADERS:") + callbacks.log_status(result.response_headers) # TODO add string escape on body output callbacks.log_intermediate(result) From d48f0ffd17cedf9602095c54743823c0f2b4fe22 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 21:47:58 -0500 Subject: [PATCH 08/14] Fix Py3 breakage leftover print from testng --- pyresttest/test_macros.py | 4 +--- 1 file changed, 1 insertion(+), 3 deletions(-) diff --git a/pyresttest/test_macros.py b/pyresttest/test_macros.py index c5709251..e2bd9a10 100644 --- a/pyresttest/test_macros.py +++ b/pyresttest/test_macros.py @@ -26,9 +26,7 @@ def __init__(self): def list_called_methods(self): """ Return all methods that have been invoked """ - v = filter(lambda x: self.mymocks[x].called == True, self.mymocks.keys()) - print v - return v + return filter(lambda x: self.mymocks[x].called == True, self.mymocks.keys()) class TestMacros(unittest.TestCase): From c72296319e00aa9675a0cf30cc03642c703cff01 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 21:59:07 -0500 Subject: [PATCH 09/14] Fix import issue with refactors --- pyresttest/resttest.py | 1 - pyresttest/tests.py | 1 + 2 files changed, 1 insertion(+), 1 deletion(-) diff --git a/pyresttest/resttest.py b/pyresttest/resttest.py index ab8426de..5c69a9ba 100644 --- a/pyresttest/resttest.py +++ b/pyresttest/resttest.py @@ -2,7 +2,6 @@ import sys import os import inspect -import traceback import yaml import pycurl import logging diff --git a/pyresttest/tests.py b/pyresttest/tests.py index 5e384f1e..d500938a 100644 --- a/pyresttest/tests.py +++ b/pyresttest/tests.py @@ -2,6 +2,7 @@ import os import copy import json +import traceback import pycurl import sys From c078fc8993d1159853f739265889294e77bc0091 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Thu, 10 Mar 2016 23:16:52 -0500 Subject: [PATCH 10/14] Fix broken logging/callbacks use & another import issue --- pyresttest/macros.py | 1 + pyresttest/resttest.py | 16 +++++++++------- 2 files changed, 10 insertions(+), 7 deletions(-) diff --git a/pyresttest/macros.py b/pyresttest/macros.py index 058b5769..558d525e 100644 --- a/pyresttest/macros.py +++ b/pyresttest/macros.py @@ -6,6 +6,7 @@ # This is all our general execution framework stuff + HTTP request stuff import sys +import json from email import message_from_string # For headers handling from .generators import parse_generator diff --git a/pyresttest/resttest.py b/pyresttest/resttest.py index 5c69a9ba..647a6111 100644 --- a/pyresttest/resttest.py +++ b/pyresttest/resttest.py @@ -168,13 +168,13 @@ def log_failure(failure, context=None, testset_config=TestSetConfig()): class LoggerCallbacks(MacroCallbacks): """ Uses a standard python logger """ def log_status(self, input): - logger.info(input) + logger.info(str(input)) def log_intermediate(self, input): - logger.debug(input) + logger.debug(str(input)) def log_failure(self, input): - logger.error(input) + logger.error(str(input)) def log_success(self, input): - logger.info(input) + logger.info(str(input)) def run_testsets(testsets): """ Execute a set of tests, using given TestSet list input """ @@ -216,7 +216,7 @@ def run_testsets(testsets): group_results[test.group] = list() group_failure_counts[test.group] = 0 - result = test.execute_macro(testset_config=myconfig, context=context, curl_handle=curl_handle) + result = test.execute_macro(callbacks=callbacks, testset_config=myconfig, context=context, curl_handle=curl_handle) result.body = None # Remove the body, save some memory! if not result.passed: # Print failure, increase failure counts for that test group @@ -256,7 +256,7 @@ def run_testsets(testsets): logger.info("Benchmark Starting: " + benchmark.name + " Group: " + benchmark.group) - benchmark_result = benchmark.execute_macro(testset_config=myconfig, context=context) + benchmark_result = benchmark.execute_macro(callbacks=callbacks, testset_config=myconfig, context=context) print(benchmark_result) logger.info("Benchmark Done: " + benchmark.name + " Group: " + benchmark.group) @@ -364,7 +364,9 @@ def main(args): if 'log' in args and args['log'] is not None: logger.setLevel(LOGGING_LEVELS.get( - args['log'].lower(), logging.NOTSET)) + args['log'].lower(), logging.INFO)) + else: + logger.setLevel(logging.INFO) if 'import_extensions' in args and args['import_extensions']: extensions = args['import_extensions'].split(';') From d346c8148ef86315cad9b832eee3695e279c89c7 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 11 Mar 2016 00:21:39 -0500 Subject: [PATCH 11/14] For benchmarks only reconfigure if context has changed --- pyresttest/benchmarks.py | 38 ++++++++++++++++++++++++++++---------- pyresttest/tests.py | 3 +-- 2 files changed, 29 insertions(+), 12 deletions(-) diff --git a/pyresttest/benchmarks.py b/pyresttest/benchmarks.py index de80b7fd..0485e971 100644 --- a/pyresttest/benchmarks.py +++ b/pyresttest/benchmarks.py @@ -231,29 +231,48 @@ def execute_macro(self, context=None, testset_config=TestSetConfig(), cmdline_ar # Benchmark warm-up to allow for caching, JIT compiling, on client callbacks.log_status('Warmup: ' + message + ' started') + + old_mod_count = -1 + templated = benchmark + for x in xrange(0, warmup_runs): benchmark.update_context_before(my_context) - templated = benchmark.realize(my_context) - curl = templated.configure_curl( + + # Reconfigure if context changed + new_mod_count = my_context.mod_count + if new_mod_count != old_mod_count: + templated = benchmark.realize(my_context) + curl = templated.configure_curl( timeout=testset_config.timeout, context=my_context, curl_handle=curl) - # Do not store actual response body at all. - curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) + # Do not store actual response body at all. + curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) + old_mod_count = new_mod_count + + curl.setopt(curl.COOKIELIST, "ALL") curl.perform() callbacks.log_status('Warmup: ' + message + ' finished') callbacks.log_status('Benchmark: ' + message + ' starting') + old_mod_count = -1 for x in xrange(0, benchmark_runs): # Run the actual benchmarks + # Setup benchmark benchmark.update_context_before(my_context) - templated = benchmark.realize(my_context) - curl = templated.configure_curl( - timeout=testset_config.timeout, context=my_context, curl_handle=curl) - # Do not store actual response body at all. - curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) + + # Only reconfigure if mod count changed + new_mod_count = my_context.mod_count + if new_mod_count != old_mod_count: + templated = benchmark.realize(my_context) + curl = templated.configure_curl( + timeout=testset_config.timeout, context=my_context, curl_handle=curl) + # Do not store actual response body at all. + curl.setopt(pycurl.WRITEFUNCTION, lambda x: None) + old_mod_count = new_mod_count try: # Run the curl call, if it errors, then add to failure counts for benchmark + curl.setopt(curl.COOKIELIST, "ALL") curl.perform() except Exception: output.failures = output.failures + 1 @@ -273,7 +292,6 @@ def execute_macro(self, context=None, testset_config=TestSetConfig(), cmdline_ar output.results = temp_results return analyze_benchmark_results(output, benchmark) - def realize_partial(self, context=None): """ Attempt to template out what is possible for this benchmark """ if not self.is_dynamic(): diff --git a/pyresttest/tests.py b/pyresttest/tests.py index d500938a..62916c86 100644 --- a/pyresttest/tests.py +++ b/pyresttest/tests.py @@ -498,6 +498,7 @@ def configure_curl(self, timeout=DEFAULT_TIMEOUT, context=None, curl_handle=None curl.setopt(pycurl.POSTFIELDSIZE, len(bod)) # Template headers as needed and convert headers dictionary to list of header entries + head = self.get_headers(context=context) head = copy.copy(head) # We're going to mutate it, need to copy @@ -549,8 +550,6 @@ def parse_test(cls, base_url, node, input_test=None, test_path=None): # Clean up for easy parsing node = lowercase_keys(flatten_dictionaries(node)) - - # Simple table of variable name, coerce function, and optionally special store function CONFIG_ELEMENTS = { # Simple variables From 0ac1f40d9e1f499aec6405c0f7f53f8ec8c1befa Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Fri, 11 Mar 2016 08:59:07 -0500 Subject: [PATCH 12/14] Fix callbacks mock in macros test harness for different type on instance methods --- pyresttest/test_macros.py | 6 ++++-- 1 file changed, 4 insertions(+), 2 deletions(-) diff --git a/pyresttest/test_macros.py b/pyresttest/test_macros.py index e2bd9a10..2b70ef2b 100644 --- a/pyresttest/test_macros.py +++ b/pyresttest/test_macros.py @@ -17,7 +17,9 @@ class MockingCallbacks(MacroCallbacks): mymocks = None def __init__(self): - origmethods = inspect.getmembers(MacroCallbacks, predicate=inspect.ismethod) + # Creates mocks for all methods, In Py2 they're members, in py3, functions + origmethods = inspect.getmembers(MacroCallbacks, predicate= + lambda x: inspect.ismethod(x) or inspect.isfunction(x)) self.mymocks = dict() for method in origmethods: newmock = mock.MagicMock(name=method[0], return_value=True) @@ -25,7 +27,7 @@ def __init__(self): setattr(self, method[0], newmock) def list_called_methods(self): - """ Return all methods that have been invoked """ + """ Return all methods that have been invoked """ return filter(lambda x: self.mymocks[x].called == True, self.mymocks.keys()) class TestMacros(unittest.TestCase): From 9b04e6adda0b2065170e8421dba3509bcf488e6d Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Tue, 20 Dec 2016 08:55:22 -0500 Subject: [PATCH 13/14] WIP for ongoing work --- pyresttest/macros.py | 13 +++++++++++++ pyresttest/tests.py | 33 +++++++++++++++++++-------------- 2 files changed, 32 insertions(+), 14 deletions(-) diff --git a/pyresttest/macros.py b/pyresttest/macros.py index 558d525e..3a682e6d 100644 --- a/pyresttest/macros.py +++ b/pyresttest/macros.py @@ -92,6 +92,19 @@ class Macro(object): name = u'Unnamed' macro_name = None + def pre_request(self, *args, **kwargs): + """ Work done before request can run + + """ + pass + + def execute_request(self, *args, **kwargs): + pass + + def post_request(self, *args, **kwargs): + """ Work done after request is run """ + pass + def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_args=None, callbacks=MacroCallbacks(), curl_handle=None, *args, **kwargs): """ Skeletal execution basis """ diff --git a/pyresttest/tests.py b/pyresttest/tests.py index 62916c86..189e3c82 100644 --- a/pyresttest/tests.py +++ b/pyresttest/tests.py @@ -288,21 +288,24 @@ def __str__(self): def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_args=None, callbacks=MacroCallbacks(), curl_handle=None, *args, **kwargs): """ Put together test pieces: configure & run actual test, return results """ - # Initialize a context if not supplied + mytest=self + + # Initialize a context if not supplied, and do context updates my_context = context if my_context is None: my_context = Context() - - mytest=self - mytest.update_context_before(my_context) + + + # Pre-run initialization of object, generate executable test objects templated_test = mytest.realize(my_context) - curl = templated_test.configure_curl( - timeout=testset_config.timeout, context=my_context, curl_handle=curl_handle) result = TestResponse() result.test = templated_test + result.passed = None - # reset the body, it holds values from previous runs otherwise + # Request setup + curl = templated_test.configure_curl( + timeout=testset_config.timeout, context=my_context, curl_handle=curl_handle) headers = MyIO() body = MyIO() curl.setopt(pycurl.WRITEFUNCTION, body.write) @@ -311,10 +314,9 @@ def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_ar curl.setopt(pycurl.VERBOSE, True) if testset_config.ssl_insecure: curl.setopt(pycurl.SSL_VERIFYPEER, 0) - curl.setopt(pycurl.SSL_VERIFYHOST, 0) - - result.passed = None + curl.setopt(pycurl.SSL_VERIFYHOST, 0) + # Pre-request work, wait for input or add a delay before the request runs if testset_config.interactive: callbacks.log_status("===================================") callbacks.log_status("%s" % mytest.name) @@ -331,6 +333,7 @@ def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_ar callbacks.log_status("Delaying for %ds" % mytest.delay) time.sleep(mytest.delay) + # Execute the test, and handle errors try: curl.perform() # Run the actual call except Exception as e: @@ -343,14 +346,16 @@ def execute_macro(self, testset_config=TestSetConfig(), context=None, cmdline_ar curl.close() return result - # Retrieve values + # Post-request work: perform cleanup and gather info from the request as needed + response_code = curl.getinfo(pycurl.RESPONSE_CODE) + result.response_code = response_code result.body = body.getvalue() body.close() result.response_headers = text_type(headers.getvalue(), HEADER_ENCODING) # Per RFC 2616 headers.close() - - response_code = curl.getinfo(pycurl.RESPONSE_CODE) - result.response_code = response_code + + # We are now done with the request, now we can do all the analysis and reporting + # This uses the result object and result bodies + test config callbacks.log_intermediate("Initial Test Result, based on expected response code: " + str(response_code in mytest.expected_status)) From 621ed3888baedaf5a267acfade976464b9b08076 Mon Sep 17 00:00:00 2001 From: Sam Van Oort Date: Mon, 3 Apr 2017 20:43:34 -0400 Subject: [PATCH 14/14] Save WIP --- WIP.py | 42 +++++++++++++++++++ ...miniapp-benchmark-overhead-generators.yaml | 4 +- 2 files changed, 44 insertions(+), 2 deletions(-) create mode 100644 WIP.py diff --git a/WIP.py b/WIP.py new file mode 100644 index 00000000..bac4d652 --- /dev/null +++ b/WIP.py @@ -0,0 +1,42 @@ +# Sample API for all execution shit: + +Reportable(object): + + + def update_context_before(): + pass + def update_context_after(self, context, ): + pass + + def setup(self, context, *args, **kwargs): + print("") + + # Execution object might be a curl handle, a test/benchmark, whatever + # For benchmarks, an execution object is a sub-reportable with lifecycle for each request + + def real_execution():hr + self.update_context() + self.apply_templating() # Currently what we use to generate a templated object + self.initalize_report() + self.initialize_execution(self, execution_object, templated_result) + self.pre_execution(self, execution_object) # for delays, etc + self.run_execution(self, execution_object) + self.post_execution(self, execution_object) # for post-request cleanup + self.analyze_execution(self, execution_object, report_object) + self.analyze_report(self, report_object) + self.update_context_after() + + + +REPORTABLE METHOD: + runme: + - update_context_before + - initalize_reporting/states -> return obj for report + + - create_exection(self, options) -> returns object specific to impl + - run_exection -> curl.perform or benchmark loop + - analyze_execution -> does collection of data from execution + + - update_context_after(request, execution_info) + - generate_report/ + diff --git a/examples/miniapp-benchmark-overhead-generators.yaml b/examples/miniapp-benchmark-overhead-generators.yaml index a386b6af..90233b1f 100644 --- a/examples/miniapp-benchmark-overhead-generators.yaml +++ b/examples/miniapp-benchmark-overhead-generators.yaml @@ -14,8 +14,8 @@ - method: 'PUT' - headers: {'Content-Type': 'application/json'} - body: {template: '{"first_name": "Gaius","id": "$id","last_name": "Baltar","login": "$id"}'} - - 'benchmark_runs': '1000' + - 'benchmark_runs': '10000' - output_format: csv - metrics: - total_time: total - - total_time: mean \ No newline at end of file + - total_time: mean