File: fuzz.yml

package info (click to toggle)
python-hypothesis 6.148.7-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 15,728 kB
  • sloc: python: 64,147; ruby: 1,107; sh: 270; makefile: 43; javascript: 6
file content (91 lines) | stat: -rw-r--r-- 3,262 bytes parent folder | download
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
name: Fuzzing

env:
  # Tell pytest and other tools to produce coloured terminal output.
  # Make sure this is also in the "passenv" section of the tox config.
  PY_COLORS: 1

on:
  # Run every six hours, for six hours each time
  schedule:
    - cron:  '0 */6 * * *'
  # Allow manual launching too so we can test any branch we like
  workflow_dispatch:
  # # Enable this and reduce the timeout below to check a PR is working
  # pull_request:
  #   branches: [ master ]

jobs:
  fuzz:
    if: github.repository == 'HypothesisWorks/hypothesis' || github.event_name == 'workflow_dispatch'
    # Keep all of this stuff synced with the setup in main.yml for CI
    runs-on: ubuntu-latest
    steps:
    - uses: actions/checkout@v3
      with:
        fetch-depth: 0
    - name: Set up Python 3.10
      uses: actions/setup-python@v4
      with:
        python-version: "3.10.9"
    - name: Restore cache
      uses: actions/cache@v3
      with:
        path: |
          ~/.cache
          ~/wheelhouse
          ~/.local
          vendor/bundle
          .tox/
        key: deps-${{ runner.os }}-${{ hashFiles('requirements/*.txt') }}-${{ matrix.task }}
        restore-keys: |
          deps-${{ runner.os }}-${{ hashFiles('requirements/*.txt') }}
          deps-${{ runner.os }}

    # OK, on to the fuzzing-specific part.
    # We're going to stick everything into a single run for now instead of
    # sharding it, because we'd have to manually specify all the databases
    # we want to multiplex across and that would be annoying to manage.
    # TODO: revisit this later; a redis-like service would be so much nicer.
    - name: Download example database
      uses: dawidd6/action-download-artifact@v9
      with:
        name: hypothesis-example-db
        path: .hypothesis/examples
        if_no_artifact_found: warn
        workflow_conclusion: completed

    - name: Install dependencies
      run: |
        pip install --upgrade setuptools pip wheel
        pip install -r requirements/fuzzing.txt
        pip install hypothesis-python/[all]

    - name: Run hypofuzz session
      continue-on-error: true
      # The timeout ensures that we finish all steps within the six-hour
      # maximum runtime for Github Actions.
      # Then run the fuzzer on everything, as for our Windows CI; avoiding
      # the --no-dashboard option because that also disables .patch writing.
      run: |
        timeout --preserve-status 5.5h \
          hypothesis fuzz -- hypothesis-python/tests/ \
            --ignore=hypothesis-python/tests/quality/ \
            --ignore=hypothesis-python/tests/ghostwriter/

    - name: Upload patch files with covering and failing `@example()`s
      uses: actions/upload-artifact@v4
      if: always()
      with:
        name: explicit-example-patches
        path: .hypothesis/patches/latest_hypofuzz_*.patch
    
    # Upload the database so it'll be persisted between runs.
    # Note that we can also pull it down to use locally via
    # https://hypothesis.readthedocs.io/en/latest/database.html#hypothesis.database.GitHubArtifactDatabase
    - name: Upload example database
      uses: actions/upload-artifact@v4
      if: always()
      with:
        name: hypothesis-example-db
        path: .hypothesis/examples