1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331 332 333 334 335 336 337 338 339 340 341 342 343 344 345 346 347 348 349 350 351 352 353 354 355 356 357 358 359 360 361 362 363 364 365 366 367 368 369 370 371 372 373 374 375 376 377 378 379 380 381 382 383 384 385 386 387 388 389 390 391 392 393 394 395 396 397 398 399 400 401 402 403 404 405 406 407 408 409 410 411 412 413 414 415 416 417 418 419 420 421 422 423 424 425 426 427 428 429 430 431 432 433 434 435 436 437 438 439 440 441 442
|
from __future__ import absolute_import
import json
import os
import yaml
import pytest
from ansible_runner.interface import init_runner
HERE = os.path.abspath(os.path.dirname(__file__))
@pytest.fixture()
def executor(tmp_path, request):
private_data_dir = tmp_path / 'foo'
private_data_dir.mkdir()
playbooks = request.node.callspec.params.get('playbook')
playbook = list(playbooks.values())[0]
envvars = request.node.callspec.params.get('envvars')
if envvars is None:
envvars = {}
# warning messages create verbose events and interfere with assertions
envvars["ANSIBLE_DEPRECATION_WARNINGS"] = "False"
# python interpreter used is not of much interest, we really want to silence warnings
envvars['ANSIBLE_PYTHON_INTERPRETER'] = 'auto_silent'
inventory = 'localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"'
r = init_runner(
private_data_dir=private_data_dir,
inventory=inventory,
envvars=envvars,
playbook=yaml.safe_load(playbook)
)
return r
@pytest.mark.parametrize('event', ['playbook_on_start',
'playbook_on_play_start',
'playbook_on_task_start', 'runner_on_ok',
'playbook_on_stats'])
@pytest.mark.parametrize('playbook', [
{'helloworld.yml': '''
- name: Hello World Sample
connection: local
hosts: all
gather_facts: no
tasks:
- name: Hello Message
debug:
msg: "Hello World!"
'''}, # noqa
{'results_included.yml': '''
- name: Run module which generates results list
connection: local
hosts: all
gather_facts: no
vars:
results: ['foo', 'bar']
tasks:
- name: Generate results list
debug:
var: results
'''} # noqa
], ids=['helloworld.yml', 'results_included.yml'])
@pytest.mark.parametrize('envvars', [
{'ANSIBLE_CALLBACK_PLUGINS': os.path.join(HERE, 'callback')},
{'ANSIBLE_CALLBACK_PLUGINS': ''}],
ids=['local-callback-plugin', 'no-callback-plugin']
)
def test_callback_plugin_receives_events(executor, event, playbook, envvars): # pylint: disable=W0613,W0621
executor.run()
assert list(executor.events)
assert event in [task['event'] for task in executor.events]
@pytest.mark.parametrize('playbook', [
{'no_log_on_ok.yml': '''
- name: args should not be logged when task-level no_log is set
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "SENSITIVE"
no_log: true
'''}, # noqa
{'no_log_on_fail.yml': '''
- name: failed args should not be logged when task-level no_log is set
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "SENSITIVE"
no_log: true
failed_when: true
ignore_errors: true
'''}, # noqa
{'no_log_on_skip.yml': '''
- name: skipped task args should be suppressed with no_log
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "SENSITIVE"
no_log: true
when: false
'''}, # noqa
{'no_log_on_play.yml': '''
- name: args should not be logged when play-level no_log set
connection: local
hosts: all
gather_facts: no
no_log: true
tasks:
- shell: echo "SENSITIVE"
'''}, # noqa
{'async_no_log.yml': '''
- name: async task args should suppressed with no_log
connection: local
hosts: all
gather_facts: no
no_log: true
tasks:
- async: 10
poll: 1
shell: echo "SENSITIVE"
no_log: true
'''}, # noqa
{'with_items.yml': '''
- name: with_items tasks should be suppressed with no_log
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo {{ item }}
no_log: true
with_items: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
when: item != "SENSITIVE-SKIPPED"
failed_when: item == "SENSITIVE-FAILED"
ignore_errors: yes
'''}, # noqa, NOTE: with_items will be deprecated in 2.9
{'loop.yml': '''
- name: loop tasks should be suppressed with no_log
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo {{ item }}
no_log: true
loop: [ "SENSITIVE", "SENSITIVE-SKIPPED", "SENSITIVE-FAILED" ]
when: item != "SENSITIVE-SKIPPED"
failed_when: item == "SENSITIVE-FAILED"
ignore_errors: yes
'''}, # noqa
])
def test_callback_plugin_no_log_filters(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert list(executor.events)
assert 'SENSITIVE' not in json.dumps(list(executor.events))
@pytest.mark.parametrize('playbook', [
{'no_log_on_ok.yml': '''
- name: args should not be logged when no_log is set at the task or module level
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "PUBLIC"
- shell: echo "PRIVATE"
no_log: true
- uri: url=https://example.org url_username="PUBLIC" url_password="PRIVATE"
'''}, # noqa
])
def test_callback_plugin_task_args_leak(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)
assert events[0]['event'] == 'playbook_on_start'
assert events[1]['event'] == 'playbook_on_play_start'
# task 1
assert events[2]['event'] == 'playbook_on_task_start'
assert events[3]['event'] == 'runner_on_start'
assert events[4]['event'] == 'runner_on_ok'
# task 2 no_log=True
assert events[5]['event'] == 'playbook_on_task_start'
assert events[6]['event'] == 'runner_on_start'
assert events[7]['event'] == 'runner_on_ok'
assert 'PUBLIC' in json.dumps(events), events
for event in events:
assert 'PRIVATE' not in json.dumps(event), event
# make sure playbook was successful, so all tasks were hit
assert not events[-1]['event_data']['failures'], 'Unexpected playbook execution failure'
@pytest.mark.parametrize(
"playbook",
[
{
"simple.yml": """
- name: simpletask
connection: local
hosts: all
gather_facts: no
tasks:
- shell: echo "resolved actions test!"
"""
}, # noqa
],
)
def test_resolved_actions(executor, playbook, skipif_pre_ansible212): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)
# task 1
assert events[2]["event"] == "playbook_on_task_start"
assert "resolved_action" in events[2]["event_data"]
assert events[2]["event_data"]["resolved_action"] == "ansible.builtin.shell"
@pytest.mark.parametrize("playbook", [
{'loop_with_no_log.yml': '''
- name: playbook variable should not be overwritten when using no log
connection: local
hosts: all
gather_facts: no
tasks:
- command: "{{ item }}"
register: command_register
no_log: True
with_items:
- "echo helloworld!"
- debug: msg="{{ command_register.results|map(attribute='stdout')|list }}"
'''}, # noqa
])
def test_callback_plugin_censoring_does_not_overwrite(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
events = list(executor.events)
assert events[0]['event'] == 'playbook_on_start'
assert events[1]['event'] == 'playbook_on_play_start'
# task 1
assert events[2]['event'] == 'playbook_on_task_start'
# Ordering of task and item events may differ randomly
assert set(['runner_on_start', 'runner_item_on_ok', 'runner_on_ok']) == {data['event'] for data in events[3:6]}
# task 2 no_log=True
assert events[6]['event'] == 'playbook_on_task_start'
assert events[7]['event'] == 'runner_on_start'
assert events[8]['event'] == 'runner_on_ok'
assert 'helloworld!' in events[8]['event_data']['res']['msg']
@pytest.mark.parametrize('playbook', [
{'strip_env_vars.yml': '''
- name: sensitive environment variables should be stripped from events
connection: local
hosts: all
tasks:
- shell: echo "Hello, World!"
'''}, # noqa
])
def test_callback_plugin_strips_task_environ_variables(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert list(executor.events)
for event in list(executor.events):
assert os.environ['PATH'] not in json.dumps(event)
@pytest.mark.parametrize('playbook', [
{'custom_set_stat.yml': '''
- name: custom set_stat calls should persist to the local disk so awx can save them
connection: local
hosts: all
tasks:
- set_stats:
data:
foo: "bar"
'''}, # noqa
])
def test_callback_plugin_saves_custom_stats(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
for event in executor.events:
event_data = event.get('event_data', {})
if 'artifact_data' in event_data:
assert event_data['artifact_data'] == {'foo': 'bar'}
break
else:
raise Exception('Did not find expected artifact data in event data')
@pytest.mark.parametrize('playbook', [
{'handle_playbook_on_notify.yml': '''
- name: handle playbook_on_notify events properly
connection: local
hosts: all
handlers:
- name: my_handler
debug: msg="My Handler"
tasks:
- debug: msg="My Task"
changed_when: true
notify:
- my_handler
'''}, # noqa
])
def test_callback_plugin_records_notify_events(executor, playbook): # pylint: disable=W0613,W0621
executor.run()
assert list(executor.events)
notify_events = [x for x in executor.events if x['event'] == 'playbook_on_notify']
assert len(notify_events) == 1
assert notify_events[0]['event_data']['handler'] == 'my_handler'
assert notify_events[0]['event_data']['host'] == 'localhost'
assert notify_events[0]['event_data']['task'] == 'debug'
@pytest.mark.parametrize('playbook', [
{'no_log_module_with_var.yml': '''
- name: ensure that module-level secrets are redacted
connection: local
hosts: all
vars:
pw: SENSITIVE
tasks:
- uri:
url: https://example.org
url_username: john-jacob-jingleheimer-schmidt
url_password: "{{ pw }}"
'''}, # noqa
])
def test_module_level_no_log(executor, playbook): # pylint: disable=W0613,W0621
# It's possible for `no_log=True` to be defined at the _module_ level,
# e.g., for the URI module password parameter
# This test ensures that we properly redact those
executor.run()
assert list(executor.events)
assert 'john-jacob-jingleheimer-schmidt' in json.dumps(list(executor.events))
assert 'SENSITIVE' not in json.dumps(list(executor.events))
def test_output_when_given_invalid_playbook(tmp_path):
# As shown in the following issue:
#
# https://github.com/ansible/ansible-runner/issues/29
#
# There was a lack of output by runner when a playbook that doesn't exist
# is provided. This was fixed in this PR:
#
# https://github.com/ansible/ansible-runner/pull/34
#
# But no test validated it. This does that.
private_data_dir = str(tmp_path)
ex = init_runner(
private_data_dir=private_data_dir,
inventory='localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"',
envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"},
playbook=os.path.join(private_data_dir, 'fake_playbook.yml')
)
ex.run()
with ex.stdout as f:
stdout = f.read()
assert "ERROR! the playbook:" in stdout
assert "could not be found" in stdout
def test_output_when_given_non_playbook_script(tmp_path):
# As shown in the following pull request:
#
# https://github.com/ansible/ansible-runner/pull/256
#
# This ports some functionality that previously lived in awx and allows raw
# lines of stdout to be treated as event lines.
#
# As mentioned in the pull request as well, there were no specs added, and
# this is a retro-active test based on the sample repo provided in the PR:
#
# https://github.com/AlanCoding/ansible-runner-examples/tree/master/non_playbook/sleep_with_writes
private_data_dir = str(tmp_path)
with open(os.path.join(private_data_dir, "args"), 'w') as args_file:
args_file.write("bash sleep_and_write.sh\n")
with open(os.path.join(private_data_dir, "sleep_and_write.sh"), 'w') as script_file:
script_file.write("echo 'hi world'\nsleep 0.5\necho 'goodbye world'\n")
# Update the settings to make this test a bit faster :)
os.mkdir(os.path.join(private_data_dir, "env"))
with open(os.path.join(private_data_dir, "env", "settings"), 'w') as settings_file:
settings_file.write("pexpect_timeout: 0.2")
ex = init_runner(
private_data_dir=private_data_dir,
inventory='localhost ansible_connection=local ansible_python_interpreter="{{ ansible_playbook_python }}"',
envvars={"ANSIBLE_DEPRECATION_WARNINGS": "False"}
)
ex.run()
with ex.stdout as f:
stdout = f.readlines()
assert stdout[0].strip() == "hi world"
assert stdout[1].strip() == "goodbye world"
events = list(ex.events)
assert len(events) == 2
assert events[0]['event'] == 'verbose'
assert events[0]['stdout'] == 'hi world'
assert events[1]['event'] == 'verbose'
assert events[1]['stdout'] == 'goodbye world'
@pytest.mark.parametrize('playbook', [
{'listvars.yml': '''
- name: List Variables
connection: local
hosts: localhost
gather_facts: false
tasks:
- name: Print a lot of lines
debug:
msg: "{{ ('F' * 150) | list }}"
'''}, # noqa
])
def test_large_stdout_parsing_when_using_json_output(executor, playbook): # pylint: disable=W0613,W0621
# When the json flag is used, it is possible to output more data than
# pexpect's maxread default of 2000 characters. As a result, if not
# handled properly, the stdout can end up being corrupted with partial
# non-event matches with raw "non-json" lines being intermixed with json
# ones.
#
# This tests to confirm we don't pollute the stdout output with non-json
# lines when a single event has a lot of output.
executor.config.env['ANSIBLE_NOCOLOR'] = str(True)
executor.run()
with executor.stdout as f:
text = f.read()
assert text.count('"F"') == 150
|