From baf8c39a8d4d690ba0306ff555394a6a476e1e51 Mon Sep 17 00:00:00 2001 From: Edward Chen <18449977+edgchen1@users.noreply.github.com> Date: Mon, 9 Aug 2021 10:37:05 -0700 Subject: [PATCH] Add Python checks pipeline (#7032) This change adds a new pipeline for checking Python code. Currently this pipeline only runs flake8. flake8 is also run as part of the CMake project builds, but we can switch over completely to the new pipeline later. The .flake8 config file was also updated to make it easier to run standalone (flake8 --config ./.flake8) and some Python formatting issues were addressed in files that were not previously scanned. --- .flake8 | 23 ++++- .../generate_submodule_cgmanifest.py | 11 ++- csharp/testdata/test_input_BFLOAT16.py | 13 ++- csharp/testdata/test_input_FLOAT16.py | 18 ++-- .../training/orttrainer/mnist/ort_mnist.py | 5 +- .../orttrainer/mnist/pytorch_mnist.py | 2 + .../pytorch_transformer/ort_train.py | 1 - .../pytorch_transformer/ort_utils.py | 8 +- .../pytorch_transformer/pt_model.py | 1 - .../pytorch_transformer/pt_train.py | 2 - .../orttrainer/pytorch_transformer/utils.py | 11 ++- setup.py | 95 ++++++++++--------- .../python-checks-ci-pipeline.yml | 19 ++++ tools/ci_build/github/python_checks/readme.md | 18 ++++ .../github/python_checks/requirements.txt | 1 + 15 files changed, 146 insertions(+), 82 deletions(-) create mode 100644 tools/ci_build/github/azure-pipelines/python-checks-ci-pipeline.yml create mode 100644 tools/ci_build/github/python_checks/readme.md create mode 100644 tools/ci_build/github/python_checks/requirements.txt diff --git a/.flake8 b/.flake8 index ba27738f476fb..16af86b67baf5 100644 --- a/.flake8 +++ b/.flake8 @@ -3,7 +3,22 @@ max-line-length = 120 per-file-ignores = __init__.py:F401 format = [flake8 PEP8 ERROR] %(path)s:%(row)d:%(col)d: %(code)s %(text)s -# We generally exclude using cmake/flake8.cmake. If something needs to be excluded here -# The exclude value/s need to be on a newline otherwise it doesn't work (at least on Windows) -# exclude = -# ./onnxruntime/core/flatbuffers/ort_flatbuffers_py +exclude = + # ignore default build directory + ./build, + # ignore external dependency files + ./cmake/external, + # TODO enable + ./docs/python, + # ignore generated flatbuffers code + ./onnxruntime/core/flatbuffers/ort_flatbuffers_py, + # TODO enable + ./onnxruntime/core/providers/nuphar, + # TODO enable + ./onnxruntime/python/tools, + # ignore test code for now + ./onnxruntime/test, + # TODO enable + ./orttraining, + # ignore server code for now + ./server, diff --git a/cgmanifests/submodules/generate_submodule_cgmanifest.py b/cgmanifests/submodules/generate_submodule_cgmanifest.py index af8ee838404a2..df6d5f0df6e80 100644 --- a/cgmanifests/submodules/generate_submodule_cgmanifest.py +++ b/cgmanifests/submodules/generate_submodule_cgmanifest.py @@ -15,24 +15,25 @@ registrations = [] -with open(os.path.join(REPO_DIR, 'tools', 'ci_build', 'github', 'linux', 'docker', 'Dockerfile.manylinux2014_cuda11'), "r") as f: +with open(os.path.join(REPO_DIR, 'tools', 'ci_build', 'github', 'linux', 'docker', 'Dockerfile.manylinux2014_cuda11'), + "r") as f: for line in f: if not line.strip(): package_name = None package_filename = None package_url = None if package_filename is None: - m = re.match("RUN\s+export\s+(.+?)_ROOT=(\S+).*", line) + m = re.match(r"RUN\s+export\s+(.+?)_ROOT=(\S+).*", line) if m is not None: package_name = m.group(1) package_filename = m.group(2) else: - m = re.match("RUN\s+export\s+(.+?)_VERSION=(\S+).*", line) + m = re.match(r"RUN\s+export\s+(.+?)_VERSION=(\S+).*", line) if m is not None: package_name = m.group(1) package_filename = m.group(2) elif package_url is None: - m = re.match("(.+?)_DOWNLOAD_URL=(\S+)", line) + m = re.match(r"(.+?)_DOWNLOAD_URL=(\S+)", line) if m is not None: package_url = m.group(2) if package_name == 'LIBXCRYPT': @@ -60,9 +61,11 @@ package_filename = None package_url = None + def normalize_path_separators(path): return path.replace(os.path.sep, "/") + proc = subprocess.run( ["git", "submodule", "foreach", "--quiet", "--recursive", "{} {} $toplevel/$sm_path".format( normalize_path_separators(sys.executable), diff --git a/csharp/testdata/test_input_BFLOAT16.py b/csharp/testdata/test_input_BFLOAT16.py index b10636292eb0b..862b57d10f1c4 100644 --- a/csharp/testdata/test_input_BFLOAT16.py +++ b/csharp/testdata/test_input_BFLOAT16.py @@ -4,27 +4,26 @@ import onnx from onnx import helper from onnx.helper import make_opsetid -from onnx import AttributeProto, TensorProto, GraphProto +from onnx import TensorProto input_info = helper.make_tensor_value_info('input', TensorProto.BFLOAT16, [1, 5]) output_info = helper.make_tensor_value_info('output', TensorProto.BFLOAT16, [1, 5]) # Create a node (NodeProto) - This is based on Pad-11 node_def = helper.make_node( - 'Identity', # node name - ['input'], # inputs - ['output'] # outputs + 'Identity', # node name + ['input'], # inputs + ['output'] # outputs ) graph_def = helper.make_graph(nodes=[node_def], name='test_types_BLOAT16', - inputs=[input_info], outputs=[output_info]) + inputs=[input_info], outputs=[output_info]) model_def = helper.make_model(graph_def, producer_name='AIInfra', - opset_imports=[make_opsetid('', 13)]) + opset_imports=[make_opsetid('', 13)]) onnx.checker.check_model(model_def) onnx.helper.strip_doc_string(model_def) final_model = onnx.shape_inference.infer_shapes(model_def) onnx.checker.check_model(final_model) onnx.save(final_model, 'test_types_BFLOAT16.onnx') - diff --git a/csharp/testdata/test_input_FLOAT16.py b/csharp/testdata/test_input_FLOAT16.py index e77406cc718ef..c787cf1c06e6f 100644 --- a/csharp/testdata/test_input_FLOAT16.py +++ b/csharp/testdata/test_input_FLOAT16.py @@ -4,26 +4,26 @@ import onnx from onnx import helper from onnx.helper import make_opsetid -from onnx import AttributeProto, TensorProto, GraphProto +from onnx import TensorProto input_info = helper.make_tensor_value_info('input', TensorProto.FLOAT16, [1, 5]) output_info = helper.make_tensor_value_info('output', TensorProto.FLOAT16, [1, 5]) # Create a node (NodeProto) - This is based on Pad-11 node_def = helper.make_node( - 'Slice', # node name - ['input'], # inputs - ['output'], # outputs - axes=[0,1], # attributes - ends=[1,5], - starts=[0,0] + 'Slice', # node name + ['input'], # inputs + ['output'], # outputs + axes=[0, 1], # attributes + ends=[1, 5], + starts=[0, 0] ) graph_def = helper.make_graph(nodes=[node_def], name='test_input_FLOAT16', - inputs=[input_info], outputs=[output_info]) + inputs=[input_info], outputs=[output_info]) model_def = helper.make_model(graph_def, producer_name='AIInfra', - opset_imports=[make_opsetid('', 7)]) + opset_imports=[make_opsetid('', 7)]) onnx.checker.check_model(model_def) onnx.helper.strip_doc_string(model_def) diff --git a/samples/python/training/orttrainer/mnist/ort_mnist.py b/samples/python/training/orttrainer/mnist/ort_mnist.py index f2340e4395335..14cfc42351dcd 100644 --- a/samples/python/training/orttrainer/mnist/ort_mnist.py +++ b/samples/python/training/orttrainer/mnist/ort_mnist.py @@ -9,7 +9,7 @@ from torchvision import datasets, transforms import onnxruntime -from onnxruntime.training import ORTTrainer, ORTTrainerOptions, optim, checkpoint +from onnxruntime.training import ORTTrainer, ORTTrainerOptions, optim # Pytorch model @@ -34,9 +34,11 @@ def mnist_model_description(): 'outputs': [('loss', [], True), ('probability', ['batch', 10])]} + def my_loss(x, target): return F.nll_loss(F.log_softmax(x, dim=1), target) + # Helpers def train(log_interval, trainer, device, train_loader, epoch, train_steps): for batch_idx, (data, target) in enumerate(train_loader): @@ -151,5 +153,6 @@ def main(): if args.save_path: torch.save(model.state_dict(), os.path.join(args.save_path, "mnist_cnn.pt")) + if __name__ == '__main__': main() diff --git a/samples/python/training/orttrainer/mnist/pytorch_mnist.py b/samples/python/training/orttrainer/mnist/pytorch_mnist.py index f6cdb8be3f18d..6e52a80dd2bd0 100644 --- a/samples/python/training/orttrainer/mnist/pytorch_mnist.py +++ b/samples/python/training/orttrainer/mnist/pytorch_mnist.py @@ -28,6 +28,7 @@ def my_loss(x, target, is_train=True): else: return F.nll_loss(F.log_softmax(x, dim=1), target, reduction='sum') + # Helpers def train(args, model, device, train_loader, optimizer, epoch): model.train() @@ -127,5 +128,6 @@ def main(): if args.save_path: torch.save(model.state_dict(), os.path.join(args.save_path, "mnist_cnn.pt")) + if __name__ == '__main__': main() diff --git a/samples/python/training/orttrainer/pytorch_transformer/ort_train.py b/samples/python/training/orttrainer/pytorch_transformer/ort_train.py index 98301067992ec..baf2d19b205e4 100644 --- a/samples/python/training/orttrainer/pytorch_transformer/ort_train.py +++ b/samples/python/training/orttrainer/pytorch_transformer/ort_train.py @@ -1,5 +1,4 @@ import argparse -import math import torch import onnxruntime diff --git a/samples/python/training/orttrainer/pytorch_transformer/ort_utils.py b/samples/python/training/orttrainer/pytorch_transformer/ort_utils.py index c97f9b4396b95..61c419964333d 100644 --- a/samples/python/training/orttrainer/pytorch_transformer/ort_utils.py +++ b/samples/python/training/orttrainer/pytorch_transformer/ort_utils.py @@ -30,8 +30,8 @@ def legacy_transformer_model_description(bptt=35, batch_size=20, ntokens=28785): label_desc = Legacy_IODescription('label', [bptt * batch_size]) loss_desc = Legacy_IODescription('loss', []) predictions_desc = Legacy_IODescription('predictions', [bptt, batch_size, ntokens]) - return Legacy_ModelDescription([input_desc, label_desc],[loss_desc, predictions_desc]),\ - Legacy_IODescription('__learning_rate', [1]) + return (Legacy_ModelDescription([input_desc, label_desc], [loss_desc, predictions_desc]), + Legacy_IODescription('__learning_rate', [1])) def legacy_transformer_model_description_dynamic_axes(ntokens=28785): @@ -39,5 +39,5 @@ def legacy_transformer_model_description_dynamic_axes(ntokens=28785): label_desc = Legacy_IODescription('label', ['bptt_x_batch_size']) loss_desc = Legacy_IODescription('loss', []) predictions_desc = Legacy_IODescription('predictions', ['bptt', 'batch_size', ntokens]) - return Legacy_ModelDescription([input_desc, label_desc],[loss_desc, predictions_desc]),\ - Legacy_IODescription('__learning_rate', [1]) + return (Legacy_ModelDescription([input_desc, label_desc], [loss_desc, predictions_desc]), + Legacy_IODescription('__learning_rate', [1])) diff --git a/samples/python/training/orttrainer/pytorch_transformer/pt_model.py b/samples/python/training/orttrainer/pytorch_transformer/pt_model.py index 63a6c3fbd465a..87a938cd4b757 100644 --- a/samples/python/training/orttrainer/pytorch_transformer/pt_model.py +++ b/samples/python/training/orttrainer/pytorch_transformer/pt_model.py @@ -1,7 +1,6 @@ import math import torch import torch.nn as nn -import torch.nn.functional as F class TransformerModel(nn.Module): diff --git a/samples/python/training/orttrainer/pytorch_transformer/pt_train.py b/samples/python/training/orttrainer/pytorch_transformer/pt_train.py index 7d3e8851c9e84..68c18842d4931 100644 --- a/samples/python/training/orttrainer/pytorch_transformer/pt_train.py +++ b/samples/python/training/orttrainer/pytorch_transformer/pt_train.py @@ -1,8 +1,6 @@ import argparse -import math import torch import torch.nn as nn -import torch.nn.functional as F from utils import prepare_data, get_batch from pt_model import TransformerModel diff --git a/samples/python/training/orttrainer/pytorch_transformer/utils.py b/samples/python/training/orttrainer/pytorch_transformer/utils.py index 7177839efec73..0d35fa98035a4 100644 --- a/samples/python/training/orttrainer/pytorch_transformer/utils.py +++ b/samples/python/training/orttrainer/pytorch_transformer/utils.py @@ -1,11 +1,11 @@ import io import os import torch -import torchtext from torchtext.utils import download_from_url, extract_archive from torchtext.data.utils import get_tokenizer from torchtext.vocab import build_vocab_from_iterator + def batchify(data, bsz, device): # Divide the dataset into bsz parts. nbatch = data.size(0) // bsz @@ -36,15 +36,16 @@ def prepare_data(device='cpu', train_batch_size=20, eval_batch_size=20, data_dir extract_path = os.path.join(data_dir, 'extracted') os.makedirs(extract_path, exist_ok=True) - test_filepath, valid_filepath, train_filepath = extract_archive(download_from_url(url, root=download_path), to_path=extract_path) + test_filepath, valid_filepath, train_filepath = extract_archive(download_from_url(url, root=download_path), + to_path=extract_path) tokenizer = get_tokenizer('basic_english') vocab = build_vocab_from_iterator(map(tokenizer, - iter(io.open(train_filepath, - encoding="utf8")))) + iter(io.open(train_filepath, + encoding="utf8")))) def data_process(raw_text_iter): data = [torch.tensor([vocab[token] for token in tokenizer(item)], - dtype=torch.long) for item in raw_text_iter] + dtype=torch.long) for item in raw_text_iter] return torch.cat(tuple(filter(lambda t: t.numel() > 0, data))) train_data = data_process(iter(io.open(train_filepath, encoding="utf8"))) diff --git a/setup.py b/setup.py index d0ab2d595ea49..1d6246f63fbb6 100644 --- a/setup.py +++ b/setup.py @@ -1,7 +1,7 @@ -# ------------------------------------------------------------------------- +# ------------------------------------------------------------------------ # Copyright (c) Microsoft Corporation. All rights reserved. # Licensed under the MIT License. -# -------------------------------------------------------------------------- +# ------------------------------------------------------------------------ from setuptools import setup, Extension from distutils import log as logger @@ -55,7 +55,7 @@ def parse_arg_remove_string(argv, arg_name_equal): if parse_arg_remove_boolean(sys.argv, '--use_tensorrt'): package_name = 'onnxruntime-gpu-tensorrt' if not nightly_build else 'ort-trt-nightly' elif wheel_name_suffix == 'gpu': - #TODO: how to support multiple CUDA versions? + # TODO: how to support multiple CUDA versions? cuda_version = parse_arg_remove_string(sys.argv, '--cuda_version=') elif parse_arg_remove_boolean(sys.argv, '--use_rocm'): package_name = 'onnxruntime-rocm' if not nightly_build else 'ort-rocm-nightly' @@ -99,6 +99,7 @@ def parse_arg_remove_string(argv, arg_name_equal): ] is_manylinux = environ.get('AUDITWHEEL_PLAT', None) in manylinux_tags + class build_ext(_build_ext): def build_extension(self, ext): dest_file = self.get_ext_fullpath(ext.name) @@ -135,8 +136,10 @@ def run(self): dest = 'onnxruntime/capi/onnxruntime_pybind11_state_manylinux1.so' logger.info('copying %s -> %s', source, dest) copyfile(source, dest) - result = subprocess.run(['patchelf', '--print-needed', dest], check=True, stdout=subprocess.PIPE, universal_newlines=True) - dependencies = ['librccl.so', 'libamdhip64.so', 'librocblas.so', 'libMIOpen.so', 'libhsa-runtime64.so', 'libhsakmt.so'] + result = subprocess.run(['patchelf', '--print-needed', dest], + check=True, stdout=subprocess.PIPE, universal_newlines=True) + dependencies = ['librccl.so', 'libamdhip64.so', 'librocblas.so', 'libMIOpen.so', + 'libhsa-runtime64.so', 'libhsakmt.so'] to_preload = [] args = ['patchelf', '--debug'] for line in result.stdout.split('\n'): @@ -150,13 +153,15 @@ def run(self): dest = 'onnxruntime/capi/libonnxruntime_providers_cuda.so' if path.isfile(dest): - result = subprocess.run(['patchelf', '--print-needed', dest], check=True, stdout=subprocess.PIPE, universal_newlines=True) - cuda_dependencies = ['libcublas.so', 'libcublasLt.so', 'libcudnn.so', 'libcudart.so', 'libcurand.so', 'libcufft.so', 'libnvToolsExt.so'] + result = subprocess.run(['patchelf', '--print-needed', dest], + check=True, stdout=subprocess.PIPE, universal_newlines=True) + cuda_dependencies = ['libcublas.so', 'libcublasLt.so', 'libcudnn.so', 'libcudart.so', + 'libcurand.so', 'libcufft.so', 'libnvToolsExt.so'] args = ['patchelf', '--debug'] for line in result.stdout.split('\n'): for dependency in cuda_dependencies: if dependency in line: - if not dependency in to_preload: + if dependency not in to_preload: to_preload.append(line) args.extend(['--remove-needed', line]) args.append(dest) @@ -169,7 +174,8 @@ def run(self): file = glob(path.join(self.dist_dir, '*linux*.whl'))[0] logger.info('repairing %s for manylinux1', file) try: - subprocess.run(['auditwheel', 'repair', '-w', self.dist_dir, file], check=True, stdout=subprocess.PIPE) + subprocess.run(['auditwheel', 'repair', '-w', self.dist_dir, file], + check=True, stdout=subprocess.PIPE) finally: logger.info('removing %s', file) remove(file) @@ -181,41 +187,42 @@ def run(self): # Additional binaries if platform.system() == 'Linux': - libs = ['onnxruntime_pybind11_state.so', 'libdnnl.so.2', 'libmklml_intel.so', 'libmklml_gnu.so', 'libiomp5.so', 'mimalloc.so'] - dl_libs = ['libonnxruntime_providers_shared.so', 'libonnxruntime_providers_cuda.so'] - # DNNL, TensorRT & OpenVINO EPs are built as shared libs - libs.extend(['libonnxruntime_providers_shared.so']) - libs.extend(['libonnxruntime_providers_dnnl.so']) - libs.extend(['libonnxruntime_providers_tensorrt.so']) - libs.extend(['libonnxruntime_providers_openvino.so']) - libs.extend(['libonnxruntime_providers_cuda.so']) - # Nuphar Libs - libs.extend(['libtvm.so.0.5.1']) - if nightly_build: - libs.extend(['libonnxruntime_pywrapper.so']) + libs = ['onnxruntime_pybind11_state.so', 'libdnnl.so.2', 'libmklml_intel.so', 'libmklml_gnu.so', 'libiomp5.so', + 'mimalloc.so'] + dl_libs = ['libonnxruntime_providers_shared.so', 'libonnxruntime_providers_cuda.so'] + # DNNL, TensorRT & OpenVINO EPs are built as shared libs + libs.extend(['libonnxruntime_providers_shared.so']) + libs.extend(['libonnxruntime_providers_dnnl.so']) + libs.extend(['libonnxruntime_providers_tensorrt.so']) + libs.extend(['libonnxruntime_providers_openvino.so']) + libs.extend(['libonnxruntime_providers_cuda.so']) + # Nuphar Libs + libs.extend(['libtvm.so.0.5.1']) + if nightly_build: + libs.extend(['libonnxruntime_pywrapper.so']) elif platform.system() == "Darwin": - libs = ['onnxruntime_pybind11_state.so', 'libdnnl.2.dylib', 'mimalloc.so'] # TODO add libmklml and libiomp5 later. - # DNNL & TensorRT EPs are built as shared libs - libs.extend(['libonnxruntime_providers_shared.dylib']) - libs.extend(['libonnxruntime_providers_dnnl.dylib']) - libs.extend(['libonnxruntime_providers_tensorrt.dylib']) - libs.extend(['libonnxruntime_providers_cuda.dylib']) - if nightly_build: - libs.extend(['libonnxruntime_pywrapper.dylib']) + libs = ['onnxruntime_pybind11_state.so', 'libdnnl.2.dylib', 'mimalloc.so'] # TODO add libmklml and libiomp5 later. + # DNNL & TensorRT EPs are built as shared libs + libs.extend(['libonnxruntime_providers_shared.dylib']) + libs.extend(['libonnxruntime_providers_dnnl.dylib']) + libs.extend(['libonnxruntime_providers_tensorrt.dylib']) + libs.extend(['libonnxruntime_providers_cuda.dylib']) + if nightly_build: + libs.extend(['libonnxruntime_pywrapper.dylib']) else: - libs = ['onnxruntime_pybind11_state.pyd', 'dnnl.dll', 'mklml.dll', 'libiomp5md.dll'] - # DNNL, TensorRT & OpenVINO EPs are built as shared libs - libs.extend(['onnxruntime_providers_shared.dll']) - libs.extend(['onnxruntime_providers_dnnl.dll']) - libs.extend(['onnxruntime_providers_tensorrt.dll']) - libs.extend(['onnxruntime_providers_openvino.dll']) - libs.extend(['onnxruntime_providers_cuda.dll']) - # DirectML Libs - libs.extend(['DirectML.dll']) - # Nuphar Libs - libs.extend(['tvm.dll']) - if nightly_build: - libs.extend(['onnxruntime_pywrapper.dll']) + libs = ['onnxruntime_pybind11_state.pyd', 'dnnl.dll', 'mklml.dll', 'libiomp5md.dll'] + # DNNL, TensorRT & OpenVINO EPs are built as shared libs + libs.extend(['onnxruntime_providers_shared.dll']) + libs.extend(['onnxruntime_providers_dnnl.dll']) + libs.extend(['onnxruntime_providers_tensorrt.dll']) + libs.extend(['onnxruntime_providers_openvino.dll']) + libs.extend(['onnxruntime_providers_cuda.dll']) + # DirectML Libs + libs.extend(['DirectML.dll']) + # Nuphar Libs + libs.extend(['tvm.dll']) + if nightly_build: + libs.extend(['onnxruntime_pywrapper.dll']) if is_manylinux: data = ['capi/libonnxruntime_pywrapper.so'] if nightly_build else [] @@ -427,7 +434,7 @@ def reformat_run_count(count_str): if build_suffix_is_date_format and build_suffix_run_count: build_suffix = build_suffix[:8] + build_suffix_run_count elif len(build_suffix) >= 12: - raise RuntimeError(f'Incorrect build suffix: "{build_suffix}"') + raise RuntimeError(f'Incorrect build suffix: "{build_suffix}"') if enable_training: from packaging import version @@ -517,7 +524,7 @@ def save_build_and_package_info(package_name, version_number, cuda_version, rocm data_files=data_files, install_requires=install_requires, keywords='onnx machine learning', - entry_points= { + entry_points={ 'console_scripts': [ 'onnxruntime_test = onnxruntime.tools.onnxruntime_test:main', ] diff --git a/tools/ci_build/github/azure-pipelines/python-checks-ci-pipeline.yml b/tools/ci_build/github/azure-pipelines/python-checks-ci-pipeline.yml new file mode 100644 index 0000000000000..17c2b8766d891 --- /dev/null +++ b/tools/ci_build/github/azure-pipelines/python-checks-ci-pipeline.yml @@ -0,0 +1,19 @@ +jobs: +- job: 'PythonCodeChecks' + pool: + vmImage: 'ubuntu-20.04' + + timeoutInMinutes: 10 + + steps: + - task: UsePythonVersion@0 + inputs: + versionSpec: '3.8' + addToPath: true + architecture: 'x64' + + - script: python -m pip install -r tools/ci_build/github/python_checks/requirements.txt + displayName: "Install requirements" + + - script: python -m flake8 --config .flake8 + displayName: "Run Flake8" diff --git a/tools/ci_build/github/python_checks/readme.md b/tools/ci_build/github/python_checks/readme.md new file mode 100644 index 0000000000000..b31300d6cf07b --- /dev/null +++ b/tools/ci_build/github/python_checks/readme.md @@ -0,0 +1,18 @@ +# Python Code Checks + +Python code checks are run by this [CI build](../azure-pipelines/python-checks-ci-pipeline.yml). +Here are instructions on how to run them manually. + +## Prerequisites + +Install requirements. + +From the repo root, run: + +`$ python -m pip install -r tools/ci_build/github/python_checks/requirements.txt` + +## Flake8 + +From the repo root, run: + +`$ python -m flake8 --config .flake8` diff --git a/tools/ci_build/github/python_checks/requirements.txt b/tools/ci_build/github/python_checks/requirements.txt new file mode 100644 index 0000000000000..b5446261e8e51 --- /dev/null +++ b/tools/ci_build/github/python_checks/requirements.txt @@ -0,0 +1 @@ +flake8==3.9