diff --git a/.github/workflows/pull.yml b/.github/workflows/pull.yml index cc4b04f197a..91699d639f3 100644 --- a/.github/workflows/pull.yml +++ b/.github/workflows/pull.yml @@ -38,8 +38,8 @@ jobs: # Build and test ExecuTorch with the add model on portable backend. PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "add" "${BUILD_TOOL}" "portable" - test-models-linux: - name: test-models-linux + test-models-linux-basic: + name: test-models-linux-basic uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main permissions: id-token: write @@ -70,6 +70,59 @@ jobs: # Build and test ExecuTorch PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" + test-models-linux: + name: test-models-linux + uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main + permissions: + id-token: write + contents: read + strategy: + matrix: + model: [linear, add, add_mul, ic3, mv2, resnet18, resnet50, mobilebert, emformer_transcribe] + backend: [portable, xnnpack-quantization-delegation] + runner: [linux.2xlarge] + include: + - model: ic4 + backend: portable + runner: linux.4xlarge.memory + - model: ic4 + backend: xnnpack-quantization-delegation + runner: linux.4xlarge.memory + - model: emformer_join + backend: portable + runner: linux.4xlarge.memory + - model: emformer_join + backend: xnnpack-quantization-delegation + runner: linux.4xlarge.memory + - model: phi-4-mini + backend: portable + runner: linux.4xlarge.memory + - model: llama3_2_vision_encoder + backend: portable + runner: linux.4xlarge.memory + - model: w2l + backend: portable + runner: linux.4xlarge.memory + fail-fast: false + with: + runner: ${{ matrix.runner }} + docker-image: executorch-ubuntu-22.04-clang12 + submodules: 'true' + ref: ${{ github.event_name == 'pull_request' && github.event.pull_request.head.sha || github.sha }} + timeout: 90 + script: | + # The generic Linux job chooses to use base env, not the one setup by the image + CONDA_ENV=$(conda env list --json | jq -r ".envs | .[-1]") + conda activate "${CONDA_ENV}" + + MODEL_NAME=${{ matrix.model }} + BUILD_TOOL=cmake + BACKEND=${{ matrix.backend }} + + PYTHON_EXECUTABLE=python bash .ci/scripts/setup-linux.sh --build-tool "${BUILD_TOOL}" + # Build and test ExecuTorch + PYTHON_EXECUTABLE=python bash .ci/scripts/test_model.sh "${MODEL_NAME}" "${BUILD_TOOL}" "${BACKEND}" + test-llama-runner-linux: name: test-llama-runner-linux uses: pytorch/test-infra/.github/workflows/linux_job_v2.yml@main