File: benchmarks.yml

package info (click to toggle)
napari 0.6.6-3
  • links: PTS, VCS
  • area: main
  • in suites: sid
  • size: 12,036 kB
  • sloc: python: 112,264; xml: 72; makefile: 44; sh: 5
file content (216 lines) | stat: -rw-r--r-- 9,252 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
# This CI configuration for relative benchmarks is based on the research done
# for scikit-image's implementation available here:
# https://github.com/scikit-image/scikit-image/blob/9bdd010a8/.github/workflows/benchmarks.yml#L1
# Blog post with the rationale: https://labs.quansight.org/blog/2021/08/github-actions-benchmarks/

name: Benchmarks

on:
  pull_request:
    types: [labeled]
  schedule:
    - cron: "6 6 * * 0" # every sunday
  workflow_dispatch:
    inputs:
      base_ref:
        description: "Baseline commit or git reference"
        required: true
      contender_ref:
        description: "Contender commit or git reference"
        required: true

# This is the main configuration section that needs to be fine tuned to napari's needs
# All the *_THREADS options is just to make the benchmarks more robust by not using parallelism
env:
  OPENBLAS_NUM_THREADS: "1"
  MKL_NUM_THREADS: "1"
  OMP_NUM_THREADS: "1"
  ASV_OPTIONS: "--split --show-stderr --factor 1.5 --attribute timeout=900"
  # --split -> split final reports in tables
  # --show-stderr -> print tracebacks if errors occur
  # --factor 1.5 -> report anomaly if tested timings are beyond 1.5x base timings
  # --attribute timeout=300 -> override timeout attribute (default=60s) to allow slow tests to run
  # see https://asv.readthedocs.io/en/stable/commands.html#asv-continuous for more details!

jobs:
  benchmark:
    if: ${{ github.event.label.name == 'run-benchmarks' && github.event_name == 'pull_request' || github.event_name == 'schedule' || github.event_name == 'workflow_dispatch' }}
    name: ${{ matrix.benchmark-name }}
    runs-on: ${{ matrix.runs-on }}
    env:
      PYTHON_VERSION: "3.12"
    permissions:
      contents: read
      issues: write
    strategy:
      fail-fast: false
      matrix:
        include:
          - benchmark-name: Qt
            asv-command: continuous
            selection-regex: "^benchmark_qt_.*"
            runs-on: macos-latest
            # Qt tests run on macOS to avoid using Xvfb business
            # xvfb makes everything run, but some tests segfault :shrug:
            # Fortunately, macOS graphics stack does not need xvfb!
          - benchmark-name: non-Qt
            asv-command: continuous
            selection-regex: "^benchmark_(?!qt_).*"
            runs-on: ubuntu-latest

    steps:
      # We need the full repo to avoid this issue
      # https://github.com/actions/checkout/issues/23
      - uses: actions/checkout@08c6903cd8c0fde910a37f88322edcfb5dd907a8 # v5.0.0
        with:
          fetch-depth: 0

      - uses: actions/setup-python@e797f83bcb11b83ae66e0230d6156d7c80228e7c # v6.0.0
        name: Install Python
        with:
          python-version: ${{ env.PYTHON_VERSION }}
          cache-dependency-path: pyproject.toml

      - name: Setup headless display
        uses: pyvista/setup-headless-display-action@7d84ae825e6d9297a8e99bdbbae20d1b919a0b19 # v4.2
        with:
          qt: true

      - name: Setup asv
        run: python -m pip install "asv[virtualenv]"
        env:
          PIP_CONSTRAINT: resources/constraints/benchmark.txt

      - uses: octokit/request-action@05a2312de9f8207044c4c9e41fe19703986acc13 # v2.x
        id: latest_release
        with:
          route: GET /repos/{owner}/{repo}/releases/latest
          owner: napari
          repo: napari
        env:
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}

      - name: Run ${{ matrix.benchmark-name }} benchmarks
        id: run_benchmark
        env:
          # asv will checkout commits, which might contain LFS artifacts; ignore those errors since
          # they are probably just documentation PNGs not needed here anyway
          GIT_LFS_SKIP_SMUDGE: 1
          HEAD_LABEL: ${{ github.event.pull_request.head.label }}
          PIP_CONSTRAINT: ${{ github.workspace }}/resources/constraints/benchmark.txt
          TMPDIR: ${{ github.workspace }}/tmp
        run: |
          set -euxo pipefail
          mkdir -p "${TMPDIR}"
          touch "${TMPDIR}/empty"
          read -ra cmd_options <<< "$ASV_OPTIONS"

          # ID this runner
          asv machine --yes

          if [[ $GITHUB_EVENT_NAME == pull_request ]]; then
            EVENT_NAME="PR #${{ github.event.pull_request.number }}"
            BASE_REF=${{ github.event.pull_request.base.sha }}
            CONTENDER_REF=${GITHUB_SHA}
            echo "Baseline:  ${BASE_REF} (${{ github.event.pull_request.base.label }})"
            echo "Contender: ${CONTENDER_REF} ($HEAD_LABEL)"
          elif [[ $GITHUB_EVENT_NAME == schedule ]]; then
            EVENT_NAME="cronjob"
            BASE_REF="${{ fromJSON(steps.latest_release.outputs.data).target_commitish }}"
            CONTENDER_REF="${GITHUB_SHA}"
            echo "Baseline:  ${BASE_REF} (${{ fromJSON(steps.latest_release.outputs.data).tag_name }})"
            echo "Contender: ${CONTENDER_REF} (current main)"
          elif [[ $GITHUB_EVENT_NAME == workflow_dispatch ]]; then
            EVENT_NAME="manual trigger"
            BASE_REF="${{ github.event.inputs.base_ref }}"
            CONTENDER_REF="${{ github.event.inputs.contender_ref }}"
            echo "Baseline:  ${BASE_REF} (workflow input)"
            echo "Contender: ${CONTENDER_REF} (workflow input)"
          fi

          echo "EVENT_NAME=$EVENT_NAME" >> "$GITHUB_ENV"
          echo "BASE_REF=$BASE_REF" >> "$GITHUB_ENV"
          echo "CONTENDER_REF=$CONTENDER_REF" >> "$GITHUB_ENV"

          # Run benchmarks for current commit against base
          asv continuous "${cmd_options[@]}" -b "${{ matrix.selection-regex }}" "${BASE_REF}" "${CONTENDER_REF}" \
          | sed -E "/Traceback | failed$|PERFORMANCE DECREASED/ s/^/::error:: /" \
          | tee asv_continuous.log

          # Report and export results for subsequent steps
          if grep "Traceback \|failed\|PERFORMANCE DECREASED" asv_continuous.log > /dev/null ; then
              exit 1
          fi

      - name: Report Failures as Issue
        if: ${{ (github.event_name == 'schedule' || github.event_name == 'workflow_dispatch') && failure() }}
        uses: JasonEtco/create-an-issue@1b14a70e4d8dc185e5cc76d3bec9eab20257b2c5 # v2.9.2
        env:
          GITHUB_TOKEN: ${{ secrets.GITHUB_TOKEN }}
          PLATFORM: ${{ matrix.runs-on }}
          PYTHON: ${{ env.PYTHON_VERSION }}
          BACKEND: ${{ matrix.benchmark-name }}
          RUN_ID: ${{ github.run_id }}
          TITLE: "[test-bot] Benchmark tests failing"
        with:
          filename: .github/TEST_FAIL_TEMPLATE.md
          update_existing: true

      - name: Upload additional information about failure
#         upload the tmp directory to the artifact
#         this is where our code stores the dumped
#         structures that crash triangulation
        if: failure()
        uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
        with:
          path: ${{ github.workspace }}/tmp
          include-hidden-files: true
          name: tmp-${{ matrix.benchmark-name }}

      - name: Add more info to artifact
        if: always()
        run: |
          # Copy the full `asv continuous` log
          cp asv_continuous.log .asv/results/asv_continuous_${{ matrix.benchmark-name }}.log
          # ensure that even if this isn't a PR, the benchmark_report workflow can run without error
          touch .asv/results/message_${{ matrix.benchmark-name }}.txt

          # Add the message that might be posted as a comment on the PR
          # We delegate the actual comment to `benchmarks_report.yml` due to
          # potential token permissions issues
          if [[ $GITHUB_EVENT_NAME == pull_request ]]; then

          echo "${{ github.event.pull_request.number }}" > .asv/results/pr_number
          echo \
          "The ${{ matrix.benchmark-name }} benchmark run requested by $EVENT_NAME ($CONTENDER_REF vs $BASE_REF) has" \
          "finished with status '${{ steps.run_benchmark.outcome }}'. See the" \
          "[CI logs and artifacts](||BENCHMARK_CI_LOGS_URL||) for further details." \
          > .asv/results/message_${{ matrix.benchmark-name }}.txt

          awk  '/Benchmark.*Parameter/,/SOME BENCHMARKS HAVE CHANGED SIGNIFICANTLY/' asv_continuous.log \
          >> .asv/results/message_${{ matrix.benchmark-name }}.txt

          fi

      - uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
        if: always()
        with:
          name: asv-benchmark-results-${{ github.run_id }}-${{ github.run_number }}-${{ github.run_attempt }}-${{ matrix.benchmark-name }}
          path: .asv/results

  combine-artifacts:
    runs-on: ubuntu-latest
    needs: benchmark
    if: always() && needs.benchmark.result != 'skipped'
    steps:
      - name: Download artifact
        uses: actions/download-artifact@634f93cb2916e3fdff6788551b99b062d0335ce0 # v5.0.0
        with:
          pattern: asv-benchmark-results*
          path: asv_result
          merge-multiple: true
      - name: Upload artifact
        uses: actions/upload-artifact@ea165f8d65b6e75b540449e92b4886f43607fa02 # v4.6.2
        with:
          name: asv-benchmark-results-${{ github.run_id }}-${{ github.run_number }}-${{ github.run_attempt }}
          path: asv_result