File: ci_run_n_monitor.py

package info (click to toggle)
mesa 26.0.0-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 325,884 kB
  • sloc: ansic: 2,260,508; xml: 1,035,283; cpp: 528,036; python: 83,447; asm: 40,568; yacc: 12,040; lisp: 3,663; lex: 3,461; sh: 1,035; makefile: 224
file content (808 lines) | stat: -rwxr-xr-x 28,054 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
450
451
452
453
454
455
456
457
458
459
460
461
462
463
464
465
466
467
468
469
470
471
472
473
474
475
476
477
478
479
480
481
482
483
484
485
486
487
488
489
490
491
492
493
494
495
496
497
498
499
500
501
502
503
504
505
506
507
508
509
510
511
512
513
514
515
516
517
518
519
520
521
522
523
524
525
526
527
528
529
530
531
532
533
534
535
536
537
538
539
540
541
542
543
544
545
546
547
548
549
550
551
552
553
554
555
556
557
558
559
560
561
562
563
564
565
566
567
568
569
570
571
572
573
574
575
576
577
578
579
580
581
582
583
584
585
586
587
588
589
590
591
592
593
594
595
596
597
598
599
600
601
602
603
604
605
606
607
608
609
610
611
612
613
614
615
616
617
618
619
620
621
622
623
624
625
626
627
628
629
630
631
632
633
634
635
636
637
638
639
640
641
642
643
644
645
646
647
648
649
650
651
652
653
654
655
656
657
658
659
660
661
662
663
664
665
666
667
668
669
670
671
672
673
674
675
676
677
678
679
680
681
682
683
684
685
686
687
688
689
690
691
692
693
694
695
696
697
698
699
700
701
702
703
704
705
706
707
708
709
710
711
712
713
714
715
716
717
718
719
720
721
722
723
724
725
726
727
728
729
730
731
732
733
734
735
736
737
738
739
740
741
742
743
744
745
746
747
748
749
750
751
752
753
754
755
756
757
758
759
760
761
762
763
764
765
766
767
768
769
770
771
772
773
774
775
776
777
778
779
780
781
782
783
784
785
786
787
788
789
790
791
792
793
794
795
796
797
798
799
800
801
802
803
804
805
806
807
808
#!/usr/bin/env python3
# Copyright © 2020 - 2022 Collabora Ltd.
# Authors:
#   Tomeu Vizoso <tomeu.vizoso@collabora.com>
#   David Heidelberg <david.heidelberg@collabora.com>
#
# For the dependencies, see the requirements.txt
# SPDX-License-Identifier: MIT

"""
Helper script to restrict running only required CI jobs
and show the job(s) logs.
"""

import argparse
import re
import sys
import time
from collections import defaultdict, Counter
from concurrent.futures import ThreadPoolExecutor
from functools import partial
from itertools import chain
from subprocess import check_output, CalledProcessError
from typing import Callable, Dict, TYPE_CHECKING, Iterable, Literal, Optional, Tuple, cast

import gitlab
import gitlab.v4.objects
from gitlab_common import (
    GITLAB_URL,
    TOKEN_DIR,
    get_gitlab_pipeline_from_url,
    get_gitlab_project,
    get_token_from_default_dir,
    is_gitlab_job,
    pretty_duration,
    read_token,
    wait_for_pipeline,
)
from gitlab_gql import GitlabGQL, create_job_needs_dag, filter_dag, print_dag, print_formatted_list
from rich.console import Console

if TYPE_CHECKING:
    from gitlab_gql import Dag

REFRESH_WAIT_LOG = 10
REFRESH_WAIT_JOBS = 6
MAX_ENABLE_JOB_ATTEMPTS = 3

STATUS_COLORS = defaultdict(lambda: "", {
    "running": "[blue]",
    "success": "[green]",
    "failed": "[red]",
    "canceled": "[magenta]",
    "canceling": "[magenta]",
})

COMPLETED_STATUSES = frozenset({"success", "failed"})
RUNNING_STATUSES = frozenset({"created", "pending", "running"})

if is_gitlab_job():
    console = Console(highlight=False, no_color=False, color_system="truecolor", width=120)
else:
    console = Console(highlight=False)
print = console.print


def print_job_status(
    job: gitlab.v4.objects.ProjectPipelineJob,
    new_status: bool = False,
) -> None:
    """It prints a nice, colored job status with a link to the job."""
    if job.status in {"canceled", "canceling"}:
        return

    if new_status and job.status == "created":
        return

    global type_field_pad
    global name_field_pad
    jtype = "🞋 job"
    job_name = job.name
    type_field_pad = len(jtype) if len(jtype) > type_field_pad else type_field_pad
    name_field_pad = len(job_name) if len(job_name) > name_field_pad else name_field_pad

    duration = job_duration(job)

    print(
        f"{STATUS_COLORS[job.status]}"
        f"{jtype:{type_field_pad}} "  # U+1F78B Round target
        f"{link2print(job.web_url, job.name, name_field_pad)} " +
        (f" has new status: {job.status}" if new_status else f" {job.status}") +
        (f" ({pretty_duration(duration)})" if job.started_at else "")
    )


def job_duration(job: gitlab.v4.objects.ProjectPipelineJob) -> float:
    """
    Given a job, report the time lapsed in execution.
    :param job: Pipeline job
    :return: Current time in execution
    """
    if job.duration:
        return job.duration
    elif job.started_at:
        # Convert both times to UTC timestamps for consistent comparison
        current_time = time.time()
        start_time = job.started_at.timestamp()
        return current_time - start_time
    return 0.0


def pretty_wait(sec: int) -> None:
    """shows progressbar in dots"""
    if is_gitlab_job():
        time.sleep(sec)
        return
    for val in range(sec, 0, -1):
        print(f"⏲  {val:2d} seconds", end="\r")  # U+23F2 Timer clock
        time.sleep(1)


def run_target_job(
    job: gitlab.v4.objects.ProjectPipelineJob,
    enable_job_fn: Callable,
    stress: int,
    execution_times: dict,
    target_statuses: dict,
) -> None:
    execution_times[job.name][job.id] = (job_duration(job), job.status, job.web_url)
    if stress and job.status in COMPLETED_STATUSES:
        if (
            stress < 0
            or len(execution_times[job.name]) < stress
        ):
            enable_job_fn(job=job, action_type="retry")
            # Wait for the next loop to get the updated job object
            return
    else:
        enable_job_fn(job=job, action_type="target")

    print_job_status(job, job.status not in target_statuses[job.name])
    target_statuses[job.name] = job.status


def monitor_pipeline(
    project: gitlab.v4.objects.Project,
    pipeline: gitlab.v4.objects.ProjectPipeline,
    job_filter: callable,
    dependencies: set[str],
    stress: int,
    inhibit_single_target_trace: int = False,
    polling_period: int = REFRESH_WAIT_JOBS,
) -> tuple[Optional[int], Optional[int], Dict[str, Dict[int, Tuple[float, str, str]]]]:
    """Monitors pipeline and delegate canceling jobs"""
    statuses: dict[str, str] = defaultdict(str)
    target_statuses: dict[str, str] = defaultdict(str)
    execution_times: dict[str, dict[str, tuple[float, str, str]]] = defaultdict(lambda: defaultdict(tuple))
    target_id: int = -1
    global type_field_pad
    type_field_pad = 0
    global name_field_pad
    name_field_pad = len(max(dependencies, key=len))+2
    # In a running pipeline, we can skip following job traces that are in these statuses.
    skip_follow_statuses: frozenset[str] = (COMPLETED_STATUSES)

    # Pre-populate the stress status counter for already completed target jobs.
    if stress:
        # When stress test, it is necessary to collect this information before start.
        for job in pipeline.jobs.list(all=True, include_retried=True):
            if job_filter(
                job_name=job.name,
                job_stage=job.stage,
                job_tags=job.tag_list,
            ) and job.status in COMPLETED_STATUSES:
                execution_times[job.name][job.id] = (job_duration(job), job.status, job.web_url)

    # jobs_waiting is a list of job names that are waiting for status update.
    # It occurs when a job that we want to run depends on another job that is not yet finished.
    jobs_waiting = []
    # Dictionary to track the number of attempts made for each job for a given status
    enable_attempts: dict[tuple[int, str], int] = {}
    # FIXME: This function has too many parameters, consider refactoring.
    enable_job_fn = partial(
        enable_job,
        project=project,
        enable_attempts=enable_attempts,
        jobs_waiting=jobs_waiting,
    )
    while True:
        deps_failed = []
        to_cancel = []
        jobs_waiting.clear()
        for job in sorted(pipeline.jobs.list(all=True), key=lambda j: j.name):
            job = cast(gitlab.v4.objects.ProjectPipelineJob, job)
            if job_filter(
                job_name=job.name,
                job_stage=job.stage,
                job_tags=job.tag_list,
            ):
                run_target_job(
                    job,
                    enable_job_fn,
                    stress,
                    execution_times,
                    target_statuses
                )
                target_id = job.id
                continue
            # all other non-target jobs
            if job.status != statuses[job.name]:
                print_job_status(job, True)
                statuses[job.name] = job.status

            # run dependencies and cancel the rest
            if job.name in dependencies:
                if not enable_job_fn(job=job, action_type="dep"):
                    # Wait for the next loop to get the updated job object
                    continue
                if job.status == "failed":
                    deps_failed.append(job.name)
            else:
                to_cancel.append(job)

        cancel_jobs(project, to_cancel)

        if stress:
            enough = True
            status_counters = {
                name: Counter(info[1] for info in runs.values())
                for name, runs in execution_times.items()
            }
            for job_name, counter in sorted(status_counters.items()):
                n_succeed = counter.get("success", 0)
                n_failed = counter.get("failed", 0)
                n_total_completed = n_succeed + n_failed
                n_total_seen = len(execution_times[job_name])
                print(
                    f"* {job_name:{name_field_pad}} succ: {n_succeed}; "
                    f"fail: {n_failed}; "
                    f"total: {n_total_seen} of {stress}",
                )
                if stress < 0 or n_total_completed < stress:
                    enough = False

            if not enough:
                pretty_wait(polling_period)
                continue

        if jobs_waiting:
            print(f"[yellow]Waiting for jobs to update status:")
            print_formatted_list(jobs_waiting, indentation=8, color="[yellow]")
            pretty_wait(polling_period)
            continue

        if (
            not inhibit_single_target_trace
            and stress in [0, 1]
            and len(target_statuses) == 1
            and RUNNING_STATUSES.intersection(target_statuses.values())
        ):
            return target_id, None, execution_times

        if (
            {"failed"}.intersection(target_statuses.values())
            and not RUNNING_STATUSES.intersection(target_statuses.values())
        ):
            return None, 1, execution_times

        if (
            {"skipped"}.intersection(target_statuses.values())
            and not RUNNING_STATUSES.intersection(target_statuses.values())
        ):
            print(
                f"[red]Target in skipped state, aborting. Failed dependencies:{deps_failed}"
            )
            return None, 1, execution_times

        if skip_follow_statuses.issuperset(target_statuses.values()):
            return None, 0, execution_times

        pretty_wait(polling_period)


def enable_job(
    project: gitlab.v4.objects.Project,
    job: gitlab.v4.objects.ProjectPipelineJob,
    enable_attempts: dict[tuple[int, str], int],
    action_type: Literal["target", "dep", "retry"],
    jobs_waiting: list[str] = list,
) -> bool:
    """
    Enable a job to run.
    :param project: The GitLab project.
    :param job: The job to enable.
    :param enable_attempts: A dictionary to track the number of attempts made for each job for a give status.
    :param action_type: The type of action to perform.
    :param jobs_waiting:
    :return: True if the job was enabled, False otherwise.
    """
    # We want to run this job, but it is not ready to run yet, so let's try again in the next
    # iteration.
    if job.status == "created":
        jobs_waiting.append(job.name)
        return False

    if (
        (job.status in COMPLETED_STATUSES and action_type != "retry")
        or job.status in {"skipped"} | RUNNING_STATUSES
    ):
        return False

    # Get current attempt number
    attempt_count = enable_attempts.get((job.id, job.status), 0)
    # Check if we've exceeded max attempts to avoid infinite loop
    if attempt_count == MAX_ENABLE_JOB_ATTEMPTS:
        print(
            f"[yellow]WARNING: "
            f"Maximum enabling attempts ({MAX_ENABLE_JOB_ATTEMPTS}) reached for job {job.name} in {job.status} status"
            f"({link2print(job.web_url, job.id)})."
        )
        enable_attempts[(job.id, job.status)] = attempt_count + 1
        return False
    elif attempt_count > MAX_ENABLE_JOB_ATTEMPTS:
        return False

    enable_attempts[(job.id, job.status)] = attempt_count + 1

    pjob = project.jobs.get(job.id, lazy=True)

    if job.status in {"success", "failed", "canceled", "canceling"}:
        try:
            pjob.retry()
        except Exception as e:
            print(f"Error retrying job {job.name}: {e}")
            return False
    else:
        try:
            pjob.play()
        except Exception as e:
            print(f"Error playing job {job.name}: {e}")
            return False

    if action_type == "target":
        jtype = "🞋 target"  # U+1F78B Round target
    elif action_type == "retry":
        jtype = "↻ retrying"  # U+21BB Clockwise open circle arrow
    else:
        jtype = "↪ dependency"  # U+21AA Left Arrow Curving Right

    global type_field_pad
    global name_field_pad
    job_name = job.name
    type_field_pad = len(jtype) if len(jtype) > type_field_pad else type_field_pad
    name_field_pad = len(job_name) if len(job_name) > name_field_pad else name_field_pad
    print(
        f"[magenta]{jtype:{type_field_pad}} {job.name:{name_field_pad}} manually enabled"
    )

    return True


def cancel_job(
    project: gitlab.v4.objects.Project,
    pipeline_job: gitlab.v4.objects.ProjectPipelineJob
) -> Optional[gitlab.v4.objects.ProjectPipelineJob]:
    """
    Cancel GitLab job
    :param project: project from the pipeline job comes from
    :param pipeline_job: job made from the pipeline list
    :return the job object when cancel was called
    """
    if pipeline_job.status not in RUNNING_STATUSES:
        return
    try:
        project_job = project.jobs.get(pipeline_job.id, lazy=True)
        project_job.cancel()
    except (gitlab.GitlabCancelError, gitlab.GitlabGetError):
        # If the job failed to cancel, it will be retried in the monitor_pipeline() next iteration
        return
    return pipeline_job


def cancel_jobs(
    project: gitlab.v4.objects.Project,
    to_cancel: list[gitlab.v4.objects.ProjectPipelineJob]
) -> None:
    """
    Cancel unwanted GitLab jobs
    :param project: project from where the pipeline comes
    :param to_cancel: list of jobs to be cancelled
    """
    if not to_cancel:
        return

    with ThreadPoolExecutor(max_workers=6) as exe:
        part = partial(cancel_job, project)
        maybe_cancelled_job = exe.map(part, to_cancel)
        cancelled_jobs = [f"🗙 {job.name}" for job in maybe_cancelled_job if job]  # U+1F5D9 Cancellation X

    # The cancelled jobs are printed without a newline
    if len(cancelled_jobs):
        print(f"Cancelled {len(cancelled_jobs)} jobs:")
        print_formatted_list(cancelled_jobs, indentation=8)


def print_log(
    project: gitlab.v4.objects.Project,
    job_id: int
) -> None:
    """Print job log into output"""
    printed_lines = 0
    while True:
        job = project.jobs.get(job_id)

        # GitLab's REST API doesn't offer pagination for logs, so we have to refetch it all
        lines = job.trace().decode().splitlines()
        for line in lines[printed_lines:]:
            print(line, markup=False)
        printed_lines = len(lines)

        if job.status in COMPLETED_STATUSES:
            print(f"[green]Job finished: {job.web_url}")
            return
        pretty_wait(REFRESH_WAIT_LOG)


def parse_args() -> argparse.Namespace:
    """Parse args"""
    parser = argparse.ArgumentParser(
        description="Tool to trigger a subset of container jobs "
        + "and monitor the progress of a test job",
        epilog="Example: mesa-monitor.py --rev $(git rev-parse HEAD) "
        + '--target ".*traces" ',
    )
    parser.add_argument(
        "--server",
        metavar="gitlab-server",
        type=str,
        default=GITLAB_URL,
        help=f"Specify the GitLab server work with (Default: {GITLAB_URL})",
    )
    parser.add_argument(
        "--target",
        metavar="target-job",
        help="Target job regex. For multiple targets, pass multiple values, "
             "eg. `--target foo bar`. Only jobs in the target stage(s) "
             "supplied, and their dependencies, will be considered.",
        required=True,
        nargs=argparse.ONE_OR_MORE,
    )
    parser.add_argument(
        "--include-stage",
        metavar="include-stage",
        help="Job stages to include when searching for target jobs. "
             "For multiple targets, pass multiple values, eg. "
             "`--include-stage foo bar`.",
        default=[".*"],
        nargs=argparse.ONE_OR_MORE,
    )
    parser.add_argument(
        "--exclude-stage",
        metavar="exclude-stage",
        help="Job stages to exclude when searching for target jobs. "
             "For multiple targets, pass multiple values, eg. "
             "`--exclude-stage foo bar`. By default, performance and "
             "nightly jobs are excluded; pass --exclude-stage '' to "
             "include them for consideration.",
        default=["performance", ".*-postmerge", ".*-nightly"],
        nargs=argparse.ONE_OR_MORE,
    )
    parser.add_argument(
        "--job-tags",
        metavar="job-tags",
        help="Job tags to require when searching for target jobs. If multiple "
             "values are passed, eg. `--job-tags 'foo.*' 'bar'`, the job will "
             "need to have a tag matching `foo.*` *and* a tag matching `bar` "
             "to qualify. Passing `--job-tags '.*'` makes sure the job has "
             "a tag defined, while not passing `--job-tags` also allows "
             "untagged jobs.",
        default=[],
        nargs=argparse.ONE_OR_MORE,
    )
    parser.add_argument(
        "--token",
        metavar="token",
        type=str,
        default=get_token_from_default_dir(),
        help="Use the provided GitLab token (with `api` scope) or token file, "
             f"otherwise it's read from {TOKEN_DIR / 'gitlab-token'}",
    )
    parser.add_argument(
        "--force-manual", action="store_true",
        help="Deprecated argument; manual jobs are always force-enabled"
    )
    parser.add_argument(
        "--stress",
        metavar="n",
        type=int,
        default=0,
        help="Stresstest job(s). Specify the number of times to rerun the selected jobs, "
             "or use -1 for indefinite. Defaults to 0. If jobs have already been executed, "
             "this will ensure the total run count respects the specified number.",
    )
    parser.add_argument(
        "--project",
        metavar="name",
        type=str,
        default="mesa",
        help="GitLab project in the format <user>/<project> or just <project>",
    )
    parser.add_argument(
        "--dry-run",
        action="store_true",
        help="Exit after printing target jobs and dependencies",
    )
    parser.add_argument(
        "--no-job-log",
        action="store_true",
        help="When there is only one target job, inhibit the job trace output in the console.",
    )
    parser.add_argument(
        "--polling-period",
        type=int,
        default=REFRESH_WAIT_JOBS,
        help=f"Specify the waiting seconds between monitor loops. (Default: {REFRESH_WAIT_JOBS})",
     )


    mutex_group1 = parser.add_mutually_exclusive_group()
    mutex_group1.add_argument(
        "--rev",
        metavar="id",
        type=str,
        default="HEAD",
        help="Repository git commit-ish, tag or branch name (default: HEAD)",
    )
    mutex_group1.add_argument(
        "--pipeline-url",
        metavar="url",
        type=str,
        help="URL of the pipeline to use, instead of auto-detecting it.",
    )
    mutex_group1.add_argument(
        "--mr",
        metavar="id",
        type=int,
        help="ID of a merge request; the latest pipeline in that MR will be used.",
    )

    args = parser.parse_args()

    # argparse doesn't support groups inside add_mutually_exclusive_group(),
    # which means we can't just put `--project` and `--rev` in a group together,
    # we have to do this by heand instead.
    if args.pipeline_url and args.project != parser.get_default("project"):
        # weird phrasing but it's the error add_mutually_exclusive_group() gives
        parser.error("argument --project: not allowed with argument --pipeline-url")

    return args


def print_detected_jobs(
    target_dep_dag: "Dag",
    dependency_jobs: Iterable[str],
    target_jobs: Iterable[str],
) -> None:
    def print_job_set(color: str, kind: str, job_set: Iterable[str]):
        job_list = list(job_set)
        print(f"{color}Running {len(job_list)} {kind} jobs:")
        print_formatted_list(job_list, indentation=8, color=color)

    print("[yellow]Detected target job and its dependencies:")
    print_dag(target_dep_dag, indentation=8, color="[yellow]")
    print_job_set("[magenta]", "dependency", dependency_jobs)
    print_job_set("[blue]", "target", target_jobs)


def find_dependencies(
    server: str,
    token: str | None,
    job_filter: callable,
    project_path: str,
    iid: int
) -> set[str]:
    """
    Find the dependencies of the target jobs in a GitLab pipeline.

    This function uses the GitLab GraphQL API to fetch the job dependency graph
    of a pipeline, filters the graph to only include the target jobs and their
    dependencies, and returns the names of these jobs.

    Args:
        server (str): The url to the GitLab server.
        token (str | None): The GitLab API token. If None, the API is accessed without
                            authentication.
        target_jobs_regex (re.Pattern): A regex pattern to match the names of the target jobs.
        project_path (str): The path of the GitLab project.
        iid (int): The internal ID of the pipeline.

    Returns:
        set[str]: A set of the names of the target jobs and their dependencies.

    Raises:
        SystemExit: If no target jobs are found in the pipeline.
    """
    gql_instance = GitlabGQL(
        url=f"{server}/api/graphql",
        token=token
    )
    dag = create_job_needs_dag(
        gql_instance, {"projectPath": project_path.path_with_namespace, "iid": iid}
    )

    target_dep_dag = filter_dag(dag, job_filter)
    if not target_dep_dag:
        print("[red]The job(s) were not found in the pipeline.")
        sys.exit(1)

    dependency_jobs = set(chain.from_iterable(d["needs"] for d in target_dep_dag.values()))
    target_jobs = set(target_dep_dag.keys())
    print_detected_jobs(target_dep_dag, dependency_jobs, target_jobs)
    return target_jobs.union(dependency_jobs)


def print_monitor_summary(
    execution_collection: Dict[str, Dict[int, Tuple[float, str, str]]],
    t_start: float,
) -> None:
    """Summary of the test execution"""
    t_end = time.perf_counter()
    spend_minutes = (t_end - t_start) / 60
    print(f"⏲ Duration of script execution: {spend_minutes:0.1f} minutes")  # U+23F2 Timer clock
    if len(execution_collection) == 0:
        return
    print(f"⏲ Jobs execution times:")  # U+23F2 Timer clock
    job_names = list(execution_collection.keys())
    job_names.sort()
    name_field_pad = len(max(job_names, key=len)) + 2
    for name in job_names:
        job_executions = execution_collection[name]
        job_times = ', '.join([__job_duration_record(job_execution)
                               for job_execution in sorted(job_executions.items())])
        print(f"* {name:{name_field_pad}}: ({len(job_executions)}) {job_times}")


def __job_duration_record(dict_item: tuple) -> str:
    """
    Format each pair of job and its duration.
    :param job_execution: item of execution_collection[name][idn]: Dict[int, Tuple[float, str, str]]
    """
    job_id = f"{dict_item[0]}"  # dictionary key
    job_duration, job_status, job_url = dict_item[1]  # dictionary value, the tuple
    return (
        f"{STATUS_COLORS[job_status]}"
        f"{link2print(job_url, job_id)}: {pretty_duration(job_duration):>8}"
    )


def link2print(url: str, text: str, text_pad: int = 0) -> str:
    text = str(text)
    text_pad = len(text) if text_pad < 1 else text_pad
    if console.is_terminal:
        return f"[link={url}]{text:{text_pad}}[/link]"
    else:
        return f"{text:{text_pad}}"


def main() -> None:
    try:
        t_start = time.perf_counter()

        args = parse_args()

        token = read_token(args.token)

        gl = gitlab.Gitlab(url=args.server,
                           private_token=token,
                           retry_transient_errors=True)

        REV: str = args.rev

        if args.pipeline_url:
            pipe, cur_project = get_gitlab_pipeline_from_url(gl, args.pipeline_url)
            REV = pipe.sha
        else:
            mesa_project = gl.projects.get("mesa/mesa")
            projects = [mesa_project]
            if args.mr:
                REV = mesa_project.mergerequests.get(args.mr).sha
            else:
                REV = check_output(['git', 'rev-parse', REV]).decode('ascii').strip()

                if args.rev == 'HEAD':
                    try:
                        branch_name = check_output([
                            'git', 'symbolic-ref', '-q', 'HEAD',
                        ]).decode('ascii').strip()
                    except CalledProcessError:
                        branch_name = ""

                    # Ignore detached heads
                    if branch_name:
                        tracked_remote = check_output([
                            'git', 'for-each-ref', '--format=%(upstream)',
                            branch_name,
                        ]).decode('ascii').strip()

                        # Ignore local branches that do not track any remote
                        if tracked_remote:
                            remote_rev = check_output([
                                'git', 'rev-parse', tracked_remote,
                            ]).decode('ascii').strip()

                            if REV != remote_rev:
                                print(
                                    f"Local HEAD commit {REV[:10]} is different than "
                                    f"tracked remote HEAD commit {remote_rev[:10]}"
                                )
                                print("Did you forget to `git push` ?")

                projects.append(get_gitlab_project(gl, args.project))
            (pipe, cur_project) = wait_for_pipeline(projects, REV)

        print(f"Revision: {REV}")
        print(f"Pipeline: {pipe.web_url}")

        target = '|'.join(args.target)
        target = target.strip()

        print(f"🞋 target job: [blue]{target}")  # U+1F78B Round target

        # Implicitly include `parallel:` jobs
        target = f'({target})' + r'( \d+/\d+)?'

        target_jobs_regex = re.compile(target)

        include_stage = '|'.join(args.include_stage)
        include_stage = include_stage.strip()

        print(f"🞋 target from stages: [blue]{include_stage}")  # U+1F78B Round target

        include_stage_regex = re.compile(include_stage)

        exclude_stage = '|'.join(args.exclude_stage)
        exclude_stage = exclude_stage.strip()

        print(f"🞋 target excluding stages: [blue]{exclude_stage}")  # U+1F78B Round target

        exclude_stage_regex = re.compile(exclude_stage)

        print(f"🞋 target jobs with tags: [blue]{str(args.job_tags)}")  # U+1F78B Round target
        job_tags_regexes = [re.compile(job_tag) for job_tag in args.job_tags]

        def job_filter(
            job_name: str,
            job_stage: str,
            job_tags: set[str],
        ) -> bool:
            """
            Apply user-specified filters to a job, and return whether the
            filters allow that job (True) or not (False).
            """
            if not target_jobs_regex.fullmatch(job_name):
                return False
            if not include_stage_regex.fullmatch(job_stage):
                return False
            if exclude_stage_regex.fullmatch(job_stage):
                return False
            if not all(
                any(job_tags_regex.fullmatch(tag) for tag in job_tags)
                for job_tags_regex in job_tags_regexes
            ):
                return False
            return True

        deps = find_dependencies(
            server=args.server,
            token=token,
            job_filter=job_filter,
            iid=pipe.iid,
            project_path=cur_project
        )

        if args.dry_run:
            sys.exit(0)

        target_job_id, ret, exec_t = monitor_pipeline(
            cur_project,
            pipe,
            job_filter,
            deps,
            args.stress,
            args.no_job_log,
            args.polling_period,
        )

        if target_job_id:
            print_log(cur_project, target_job_id)

        print_monitor_summary(exec_t, t_start)

        sys.exit(ret)
    except KeyboardInterrupt:
        sys.exit(1)


if __name__ == "__main__":
    main()