1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185
|
# This Source Code Form is subject to the terms of the Mozilla Public
# License, v. 2.0. If a copy of the MPL was not distributed with this
# file, You can obtain one at http://mozilla.org/MPL/2.0/.
import logging
from taskgraph.util import json
from taskgraph.util.parameterization import resolve_task_references
from taskgraph.util.taskcluster import get_task_definition
from .registry import register_callback_action
from .util import create_task_from_def, fetch_graph_and_labels
logger = logging.getLogger(__name__)
# Properties available for custom retrigger of any supported test suites
basic_properties = {
"path": {
"type": "string",
"maxLength": 255,
"default": "",
"title": "Path name",
"description": "Path of test(s) to retrigger",
},
"logLevel": {
"type": "string",
"enum": ["debug", "info", "warning", "error", "critical"],
"default": "info",
"title": "Log level",
"description": "Log level for output (INFO is normal, DEBUG gives more detail)",
},
"environment": {
"type": "object",
"default": {"MOZ_LOG": ""},
"title": "Extra environment variables",
"description": "Extra environment variables to use for this run",
"additionalProperties": {"type": "string"},
},
}
# Additional properties available for custom retrigger of some additional test suites
extended_properties = basic_properties.copy()
extended_properties.update(
{
"runUntilFail": {
"type": "boolean",
"default": False,
"title": "Run until failure",
"description": (
"Runs the specified set of tests repeatedly "
"until failure (up to REPEAT times)"
),
},
"repeat": {
"type": "integer",
"default": 0,
"minimum": 0,
"title": "Repeat test(s) N times",
"description": (
"Run test(s) repeatedly (usually used in "
"conjunction with runUntilFail)"
),
},
"preferences": {
"type": "object",
"default": {"remote.log.level": "Info"},
"title": "Extra gecko (about:config) preferences",
"description": "Extra gecko (about:config) preferences to use for this run",
"additionalProperties": {"type": "string"},
},
}
)
@register_callback_action(
name="retrigger-custom",
title="Retrigger task with custom parameters",
symbol="rt",
description="Retriggers the specified task with custom environment and parameters",
context=[
{"test-type": "mochitest", "worker-implementation": "docker-worker"},
{"test-type": "reftest", "worker-implementation": "docker-worker"},
{"test-type": "geckoview-junit", "worker-implementation": "docker-worker"},
],
order=10,
schema={
"type": "object",
"properties": extended_properties,
"additionalProperties": False,
"required": ["path"],
},
)
def extended_custom_retrigger_action(
parameters, graph_config, input, task_group_id, task_id
):
handle_custom_retrigger(parameters, graph_config, input, task_group_id, task_id)
@register_callback_action(
name="retrigger-custom (gtest)",
title="Retrigger gtest task with custom parameters",
symbol="rt",
description="Retriggers the specified task with custom environment and parameters",
context=[{"test-type": "gtest", "worker-implementation": "docker-worker"}],
order=10,
schema={
"type": "object",
"properties": basic_properties,
"additionalProperties": False,
"required": ["path"],
},
)
def basic_custom_retrigger_action_basic(
parameters, graph_config, input, task_group_id, task_id
):
handle_custom_retrigger(parameters, graph_config, input, task_group_id, task_id)
def handle_custom_retrigger(parameters, graph_config, input, task_group_id, task_id):
task = get_task_definition(task_id)
decision_task_id, full_task_graph, label_to_taskid, _ = fetch_graph_and_labels(
parameters, graph_config
)
pre_task = full_task_graph.tasks[task["metadata"]["name"]]
# fix up the task's dependencies, similar to how optimization would
# have done in the decision
dependencies = {
name: label_to_taskid[label] for name, label in pre_task.dependencies.items()
}
new_task_definition = resolve_task_references(
pre_task.label, pre_task.task, task_id, decision_task_id, dependencies
)
new_task_definition.setdefault("dependencies", []).extend(dependencies.values())
# don't want to run mozharness tests, want a custom mach command instead
new_task_definition["payload"]["command"] += ["--no-run-tests"]
custom_mach_command = [task["tags"]["test-type"]]
# mochitests may specify a flavor
if new_task_definition["payload"]["env"].get("MOCHITEST_FLAVOR"):
custom_mach_command += [
"--keep-open=false",
"-f",
new_task_definition["payload"]["env"]["MOCHITEST_FLAVOR"],
]
enable_e10s = json.loads(
new_task_definition["payload"]["env"].get("ENABLE_E10S", "true")
)
if not enable_e10s:
custom_mach_command += ["--disable-e10s"]
custom_mach_command += [
"--log-tbpl=-",
"--log-tbpl-level={}".format(input.get("logLevel", "debug")),
]
if input.get("runUntilFail"):
custom_mach_command += ["--run-until-failure"]
if input.get("repeat"):
custom_mach_command += ["--repeat", str(input.get("repeat", 30))]
# add any custom gecko preferences
for key, val in input.get("preferences", {}).items():
custom_mach_command += ["--setpref", f"{key}={val}"]
custom_mach_command += [input["path"]]
new_task_definition["payload"]["env"]["CUSTOM_MACH_COMMAND"] = " ".join(
custom_mach_command
)
# update environment
new_task_definition["payload"]["env"].update(input.get("environment", {}))
# tweak the treeherder symbol
new_task_definition["extra"]["treeherder"]["symbol"] += "-custom"
logging.info("New task definition: %s", new_task_definition)
create_task_from_def(
new_task_definition, parameters["level"], action_tag="retrigger-custom-task"
)
|