File: benchmark-sqlglot.yml

package info (click to toggle)
sqlglot 28.5.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 14,672 kB
  • sloc: python: 84,517; sql: 22,534; makefile: 48
file content (91 lines) | stat: -rw-r--r-- 3,919 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
name: Benchmark pull requests

on:
  issue_comment:
    types: [created, edited, deleted]
  pull_request:
    types: [opened, synchronize, reopened]

jobs:
  run-benchmark:
    name: run benchmark
    runs-on: ubuntu-latest
    if: |
      (github.event_name == 'issue_comment' && 
       contains(github.event.comment.body, '/benchmark') &&
       github.event.issue.pull_request) ||
      (github.event_name == 'pull_request' &&
       contains(github.event.pull_request.body, '/benchmark'))
    steps:
      - name: Checkout PR branch
        uses: actions/checkout@v5
        with:
          fetch-depth: 0 # Needed to fetch main branch too
      - name: Set up Python
        uses: actions/setup-python@v5
        with:
          python-version: 3.13
      - name: Create a virtual environment
        run: |
          python -m venv .venv
          source ./.venv/bin/activate
          python -m pip install --upgrade pip
          pip install pyperf
      - name: Run benchmark on PR branch
        run: |
          source ./.venv/bin/activate
          make install-dev
          make install-dev-rs-release
          python benchmarks/parse.py --quiet --output bench_parse_pr.json
          python benchmarks/optimize.py --quiet --fast --output bench_optimize_pr.json
      - name: Checkout main branch into subdir
        run: |
          git fetch origin main
          git worktree add main-branch origin/main
      - name: Reset virtual environment
        run: |
          rm -rf .venv
          python -m venv .venv
          source ./.venv/bin/activate
          python -m pip install --upgrade pip
          pip install pyperf
      - name: Run benchmark on main branch
        run: |
          source ./.venv/bin/activate
          cd main-branch
          make install-dev
          make install-dev-rs-release
          python benchmarks/parse.py --quiet --output ../bench_parse_main.json
          python benchmarks/optimize.py --quiet --fast --output ../bench_optimize_main.json
          cd ..
      - name: Compare benchmarks and save results
        run: |
          source ./.venv/bin/activate
          python -m pyperf compare_to bench_parse_main.json bench_parse_pr.json --table --table-format=md > bench_parse_comparison_raw.txt
          python -m pyperf compare_to bench_optimize_main.json bench_optimize_pr.json --table --table-format=md > bench_optimize_comparison_raw.txt

          # Format with colors
          python .github/scripts/format_benchmark.py bench_parse_comparison_raw.txt > bench_parse_comparison.txt
          python .github/scripts/format_benchmark.py bench_optimize_comparison_raw.txt > bench_optimize_comparison.txt
      - name: Combine benchmark outputs
        run: |
          echo "## Benchmark Results" > combined_benchmarks.md
          echo "" >> combined_benchmarks.md
          echo "**Legend:**" >> combined_benchmarks.md
          echo "- 🟢🟢 = 2x+ faster" >> combined_benchmarks.md
          echo "- 🟢 = 1.1x - 2x faster" >> combined_benchmarks.md
          echo "- ⚪ = No significant change (< 1.1x)" >> combined_benchmarks.md
          echo "- 🔴 = 1.1x - 2x slower" >> combined_benchmarks.md
          echo "- 🔴🔴 = 2x+ slower" >> combined_benchmarks.md
          echo "" >> combined_benchmarks.md
          echo "### Parsing Benchmark" >> combined_benchmarks.md
          cat bench_parse_comparison.txt >> combined_benchmarks.md
          echo -e "\n---\n" >> combined_benchmarks.md
          echo "### Optimization Benchmark" >> combined_benchmarks.md
          cat bench_optimize_comparison.txt >> combined_benchmarks.md
      - name: Comment on PR for parse benchmark results
        uses: peter-evans/create-or-update-comment@v4
        with:
          token: ${{ secrets.GITHUB_TOKEN }}
          issue-number: ${{ github.event.issue.number || github.event.pull_request.number }}
          body-file: combined_benchmarks.md