File: pyi_multiprocessing_nested_process.py

package info (click to toggle)
pyinstaller 6.13.0%2Bds-2
  • links: PTS, VCS
  • area: main
  • in suites: trixie
  • size: 11,520 kB
  • sloc: python: 41,347; ansic: 11,334; makefile: 176; sh: 136; xml: 19
file content (66 lines) | stat: -rw-r--r-- 2,472 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
#-----------------------------------------------------------------------------
# Copyright (c) 2023, PyInstaller Development Team.
#
# Distributed under the terms of the GNU General Public License (version 2
# or later) with exception for distributing the bootloader.
#
# The full license is in the file COPYING.txt, distributed with this software.
#
# SPDX-License-Identifier: (GPL-2.0-or-later WITH Bootloader-exception)
#-----------------------------------------------------------------------------

# Test that we can start a nested `multiprocessing.Process` from within a `multiprocessing.Process`. See #7494.

import sys
import multiprocessing


def nested_process_function(queue):
    print("Running nested sub-process!")
    queue.put(2)


def process_function(queue):
    print("Running sub-process!")
    queue.put(1)

    process = multiprocessing.Process(target=nested_process_function, args=(queue,))
    process.start()
    process.join()


def main(start_method):
    multiprocessing.set_start_method(start_method)
    queue = multiprocessing.Queue()

    process = multiprocessing.Process(target=process_function, args=(queue,))
    process.start()
    process.join()

    # Read results from queue; we expect one for each process.
    # NOTE: this goes against the `multiprocessing` programming recommendations, because we should read from queue
    # before joining the feeding processes, lest we incur a deadlock when queue's buffer fills up. However, as we put in
    # only two  elements, we take a calculated risk; this way, we can read only available items post-hoc, which in turn
    # allows us to avoid blocking forever if a process happens to fail for some reason.
    results = []
    while not queue.empty():
        results.append(queue.get())

    # NOTE: as per [1]: "If multiple processes are enqueuing objects, it is possible for the objects to be received at
    # the other end out-of-order. However, objects enqueued by the same process will always be in the expected order
    # with respect to each other."
    # [1] https://docs.python.org/3/library/multiprocessing.html#pipes-and-queues
    #
    # Therefore, accept either order of results as a valid one.
    print(f"Results: {results}")
    assert results == [1, 2] or results == [2, 1]


if __name__ == '__main__':
    multiprocessing.freeze_support()

    if len(sys.argv) != 2:
        print(f"Usage: {sys.argv[0]} <start-method>")
        sys.exit(1)

    main(sys.argv[1])