1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80
|
name: Benchmark compare last release
on:
push:
branches:
- main
workflow_dispatch:
jobs:
benchmark:
name: Linux
runs-on: ubuntu-latest
env:
ASV_DIR: "./asv_bench"
CONDA_ENV_FILE: ci/requirements/environment.yml
steps:
# We need the full repo to avoid this issue
# https://github.com/actions/checkout/issues/23
- uses: actions/checkout@v6
with:
fetch-depth: 0
- name: Set up conda environment
uses: mamba-org/setup-micromamba@v2
with:
micromamba-version: "1.5.10-0"
environment-file: ${{env.CONDA_ENV_FILE}}
environment-name: xarray-tests
cache-environment: true
cache-environment-key: "${{runner.os}}-${{runner.arch}}-py${{env.PYTHON_VERSION}}-${{env.TODAY}}-${{hashFiles(env.CONDA_ENV_FILE)}}-benchmark"
create-args: >-
asv
- name: "Get Previous tag"
id: previoustag
uses: "WyriHaximus/github-action-get-previous-tag@v1"
# with:
# fallback: 1.0.0 # Optional fallback tag to use when no tag can be found
- name: Run benchmarks
shell: bash -l {0}
id: benchmark
env:
OPENBLAS_NUM_THREADS: 1
MKL_NUM_THREADS: 1
OMP_NUM_THREADS: 1
ASV_FACTOR: 1.5
ASV_SKIP_SLOW: 1
run: |
set -x
# ID this runner
asv machine --yes
echo "Baseline: ${{ steps.previoustag.outputs.tag }} "
echo "Contender: ${{ github.sha }}"
# Use mamba for env creation
# export CONDA_EXE=$(which mamba)
export CONDA_EXE=$(which conda)
# Run benchmarks for current commit against base
ASV_OPTIONS="--split --show-stderr --factor $ASV_FACTOR"
asv continuous $ASV_OPTIONS ${{ steps.previoustag.outputs.tag }} ${{ github.sha }} \
| sed "/Traceback \|failed$\|PERFORMANCE DECREASED/ s/^/::error::/" \
| tee benchmarks.log
# Report and export results for subsequent steps
if grep "Traceback \|failed\|PERFORMANCE DECREASED" benchmarks.log > /dev/null ; then
exit 1
fi
working-directory: ${{ env.ASV_DIR }}
- name: Add instructions to artifact
if: always()
run: |
cp benchmarks/README_CI.md benchmarks.log .asv/results/
working-directory: ${{ env.ASV_DIR }}
- uses: actions/upload-artifact@v5
if: always()
with:
name: asv-benchmark-results-${{ runner.os }}
path: ${{ env.ASV_DIR }}/.asv/results
|