Skip to content

Commit

Permalink
tunnel changes from developer to develoepr msft
Browse files Browse the repository at this point in the history
Signed-off-by: v-ajitpanda <[email protected]>
  • Loading branch information
v-ajitpanda committed Mar 14, 2024
2 parents 97f34f0 + 3cfeb12 commit 7b7a623
Show file tree
Hide file tree
Showing 17 changed files with 337 additions and 452 deletions.
4 changes: 0 additions & 4 deletions Dockerfile
Original file line number Diff line number Diff line change
Expand Up @@ -15,10 +15,6 @@ RUN apt-get update

RUN apt-get install python3-pip zlib1g-dev scons -y

ARG LOCAL_DIR

ENV LOCAL_DIR_ENV=$LOCAL_DIR

WORKDIR /app

COPY . /app
Expand Down
2 changes: 1 addition & 1 deletion Makefile
Original file line number Diff line number Diff line change
Expand Up @@ -9,7 +9,7 @@ DOCKER_IMAGE_TAG=pyinstaller-ctam
DOCKER_CONTAINER_NAME=build-ctam

build_image:
docker build --tag $(DOCKER_IMAGE_TAG) --build-arg LOCAL_DIR=$(PWD) .
docker build --tag $(DOCKER_IMAGE_TAG) .
docker run -it --name $(DOCKER_CONTAINER_NAME) $(DOCKER_IMAGE_TAG)
docker cp $(DOCKER_CONTAINER_NAME):/app/dist ./dist
docker rm -f $(DOCKER_CONTAINER_NAME)
Expand Down
8 changes: 1 addition & 7 deletions build_scripts/build_script.sh
Original file line number Diff line number Diff line change
Expand Up @@ -3,14 +3,8 @@
# LICENSE file in the root directory of this source tree.
#

# get current directory
export current_dir="$LOCAL_DIR_ENV"

# path to binary
dist_dir="${current_dir}/dist"

# pyinstaller command to make binary
pyinstaller_cmd="pyinstaller --add-data=/app/ctam:. --name ctam.build --paths=/app/ctam --onefile ctam/ctam.py --runtime-tmpdir ${dist_dir}"
pyinstaller_cmd="pyinstaller --add-data=/app/ctam:. --name ctam.build --paths=/app/ctam --onefile ctam/ctam.py --workpath /tmp --distpath dist --exclude-module sqlite3"

# static to make one executable
staticx_cmd="staticx ./dist/ctam.build ./dist/ctam"
Expand Down
1 change: 0 additions & 1 deletion ctam/__init__.py
Original file line number Diff line number Diff line change
@@ -1 +0,0 @@
__version__ = "0.0.1"
285 changes: 142 additions & 143 deletions ctam/ctam.py
Original file line number Diff line number Diff line change
Expand Up @@ -61,7 +61,7 @@ def parse_args():
parser.add_argument(
"-w",
"--workspace",
required=not any(arg in sys.argv for arg in ["-l", "--list", "-v", "--version"]),
required=not any(arg in sys.argv for arg in ["-v", "--version"]),
help="Path to workspace directory that contains test run files",
)

Expand All @@ -78,7 +78,6 @@ def parse_args():
help="Display current version of ctam",
action="store_true",
)

return parser.parse_args()

def get_exception_details(exec: Exception = ""):
Expand All @@ -93,6 +92,7 @@ def get_exception_details(exec: Exception = ""):
"""
exc_type, exc_obj, exc_tb = sys.exc_info()
temp = exc_tb

traceback_details = {}
while temp:
f_name = os.path.split(temp.tb_frame.f_code.co_filename)[1]
Expand All @@ -117,148 +117,147 @@ def get_exception_details(exec: Exception = ""):

def main():
args = parse_args()
try:
# builds hierarchy of test groups and associated test cases
#ms_internal_tests

# if args.list:
# test_hierarchy.print_test_groups_test_cases(args.group)
# return 0, None, "List of tests is printed"

if args.version:
print(f"CTAM - version {__version__}")
exit()

if not os.path.isdir(args.workspace):
print("Invalid workspace specified")
return 1, None, "Invalid workspace specified"

required_workspace_files = [
"dut_info.json",
"redfish_uri_config.json",
".netrc",
]

missing_files = [
file for file in required_workspace_files if not os.path.isfile(os.path.join(args.workspace, file))
]

if missing_files:
for file_name in missing_files:
print(f"The required file {file_name} does not exist in the workspace.")
return 1, None, "Missing required files"
print(f"Version : {__version__}")
print(f"WorkSpace : {args.workspace}")
test_runner_json = os.path.join(args.workspace, "test_runner.json")
dut_info_json = os.path.join(args.workspace, "dut_info.json")
package_info_json = os.path.join(args.workspace, "package_info.json")
redfish_uri_config = os.path.join(args.workspace, "redfish_uri_config.json")
net_rc = os.path.join(args.workspace, ".netrc")

# NOTE: We have added internal test directory as mandatory if 'internal_testing' is true in test runner json.
# NOTE: If internal_test is true in test runner json then both internal and external tests we can run, else we can continue our existing flow.
with open(test_runner_json, "r") as f:
test_runner_config = json.load(f)

internal_testing = test_runner_config.get("internal_testing", False)

ifc_dir = os.path.join(os.path.dirname(__file__), "interfaces")
ext_test_root_dir = os.path.join(os.path.dirname(__file__), "tests")

if internal_testing:
int_test_root_dir = os.path.join(os.path.dirname(__file__), "internal_tests")
test_root_dir = [ext_test_root_dir, int_test_root_dir]
test_hierarchy = TestHierarchy(test_root_dir, ifc_dir)
else:
test_hierarchy = TestHierarchy(ext_test_root_dir, ifc_dir)

if args.list:
test_hierarchy.print_test_groups_test_cases(args.group)
return 0, None, "List of tests is printed"

if args.Discovery:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
)
status_code, exit_string = runner.get_system_details()
return status_code, None, exit_string

elif args.testcase:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
single_test_override=args.testcase,
)
elif args.testcase_sequence:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
sequence_test_override=args.testcase_sequence,
)
elif args.group:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
single_group_override=args.group,
)
elif args.group_sequence:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
sequence_group_override=args.group_sequence,
)
else:
all_tests = test_hierarchy.get_all_tests()
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
net_rc=net_rc,
redfish_uri_config_file=redfish_uri_config,
run_all_tests=all_tests
)

status_code, exit_string = runner.run()
log_directory = os.path.relpath(runner.output_dir, os.getcwd())
return status_code, log_directory, exit_string

except (Exception, NotImplementedError) as e:
exception_details = get_exception_details(e)
print(f"Test Run Failed: {json.dumps(exception_details, indent=4)}")
return 1, None, f"Test failed due to exception: {e}"


# try:
# builds hierarchy of test groups and associated test cases
#ms_internal_tests

if args.version:
print(f"CTAM - version {__version__}")
exit()

if not os.path.isdir(args.workspace):
print("Invalid workspace specified")
return 1, None, "Invalid workspace specified"

required_workspace_files = [
"dut_info.json",
"redfish_uri_config.json",
".netrc",
]

missing_files = [
file_name for file_name in required_workspace_files if not os.path.isfile(os.path.join(args.workspace, file_name))
]

if missing_files:
for file_name in missing_files:
print(f"The required file {file_name} does not exist in the workspace.")
return 1, None, "Missing required files"
print(f"Version : {__version__}")
print(f"WorkSpace : {args.workspace}")
test_runner_json = os.path.join(args.workspace, "test_runner.json")
dut_info_json = os.path.join(args.workspace, "dut_info.json")
package_info_json = os.path.join(args.workspace, "package_info.json")
redfish_uri_config = os.path.join(args.workspace, "redfish_uri_config.json")
net_rc = os.path.join(args.workspace, ".netrc")

# NOTE: We have added internal test directory as mandatory if 'internal_testing' is true in test runner json.
# NOTE: If internal_test is true in test runner json then both internal and external tests we can run, else we can continue our existing flow.
with open(test_runner_json, "r") as f:
test_runner_config = json.load(f)

internal_testing = test_runner_config.get("internal_testing", False)

test_ifc_root_dir = test_runner_config.get("test_ifc_override_dir", os.path.dirname(__file__))

ifc_dir = os.path.join(test_ifc_root_dir, "interfaces")
ext_test_root_dir = os.path.join(test_ifc_root_dir, "tests")

if internal_testing:
int_test_root_dir = os.path.join(test_ifc_root_dir, "internal_tests")
test_root_dir = [ext_test_root_dir, int_test_root_dir]
test_hierarchy = TestHierarchy(test_root_dir, ifc_dir)
else:
test_hierarchy = TestHierarchy(ext_test_root_dir, ifc_dir)

if args.list:
test_hierarchy.print_test_groups_test_cases(args.group)
return 0, None, "List of tests is printed"

if args.Discovery:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
)
status_code, exit_string = runner.get_system_details()
return status_code, None, exit_string

elif args.testcase:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
single_test_override=args.testcase,
)
elif args.testcase_sequence:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
sequence_test_override=args.testcase_sequence,
)
elif args.group:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
single_group_override=args.group,
)
elif args.group_sequence:
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
redfish_uri_config_file=redfish_uri_config,
net_rc=net_rc,
sequence_group_override=args.group_sequence,
)
else:
all_tests = test_hierarchy.get_all_tests()
runner = TestRunner(
workspace_dir=args.workspace,
test_hierarchy=test_hierarchy,
test_runner_json_file=test_runner_json,
dut_info_json_file=dut_info_json,
package_info_json_file=package_info_json,
net_rc=net_rc,
redfish_uri_config_file=redfish_uri_config,
run_all_tests=all_tests
)

status_code, exit_string = runner.run()
log_directory = os.path.relpath(runner.output_dir, os.getcwd())
return status_code, log_directory, exit_string

# except (Exception, NotImplementedError) as e:
# exception_details = get_exception_details(e)
# print(f"Test Run Failed: {json.dumps(exception_details, indent=4)}")
# return 1, None, f"Test failed due to exception: {e}"


if __name__ == "__main__":
status_code, log_directory, exit_string = main()
status_code, log_directory, exit_string = main()
print("\nTest exited with status code*: {} - {}".format("FAIL" if status_code else "PASS", exit_string))
print(f"Log Directory: {log_directory}")
print(f"Log Directory: {log_directory}")
print("\n*Note: Return/Status Codes - PASS(0): All tests passed, FAIL(1): Execution/runtime failure or test failure\n")
exit(status_code)

Loading

0 comments on commit 7b7a623

Please sign in to comment.