File: mixer.py

package info (click to toggle)
gst-python1.0 1.26.5-2
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 1,000 kB
  • sloc: python: 8,030; ansic: 1,869; makefile: 33
file content (110 lines) | stat: -rw-r--r-- 3,267 bytes parent folder | download | duplicates (3)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
'''
Simple mixer element, accepts 320 x 240 RGBA at 30 fps
on any number of sinkpads.

Requires PIL (Python Imaging Library)

Example pipeline:

gst-launch-1.0 py_videomixer name=mixer ! videoconvert ! autovideosink \
        videotestsrc ! mixer. \
        videotestsrc pattern=ball ! mixer. \
        videotestsrc pattern=snow ! mixer.
'''

import gi

gi.require_version('Gst', '1.0')
gi.require_version('GstBase', '1.0')
gi.require_version('GObject', '2.0')
from gi.repository import Gst, GObject, GstBase

Gst.init_python()

try:
    from PIL import Image
except ImportError:
    Gst.error('py_videomixer requires PIL')
    raise

# Completely fixed input / output
ICAPS = Gst.Caps(Gst.Structure('video/x-raw',
                               format='RGBA',
                               width=320,
                               height=240,
                               framerate=Gst.Fraction(30, 1)))

OCAPS = Gst.Caps(Gst.Structure('video/x-raw',
                               format='RGBA',
                               width=320,
                               height=240,
                               framerate=Gst.Fraction(30, 1)))


class BlendData:
    def __init__(self, outimg):
        self.outimg = outimg
        self.pts = 0
        self.eos = True


class Videomixer(GstBase.Aggregator):
    __gstmetadata__ = ('Videomixer', 'Video/Mixer',
                       'Python video mixer', 'Mathieu Duponchelle')

    __gsttemplates__ = (
        Gst.PadTemplate.new_with_gtype("sink_%u",
                                       Gst.PadDirection.SINK,
                                       Gst.PadPresence.REQUEST,
                                       ICAPS,
                                       GstBase.AggregatorPad.__gtype__),
        Gst.PadTemplate.new_with_gtype("src",
                                       Gst.PadDirection.SRC,
                                       Gst.PadPresence.ALWAYS,
                                       OCAPS,
                                       GstBase.AggregatorPad.__gtype__)
    )

    def mix_buffers(self, agg, pad, bdata):
        buf = pad.pop_buffer()
        _, info = buf.map(Gst.MapFlags.READ)

        img = Image.frombuffer('RGBA', (320, 240), info.data, "raw", 'RGBA', 0, 1)

        bdata.outimg = Image.blend(bdata.outimg, img, alpha=0.5)
        bdata.pts = buf.pts

        # Need to ensure the PIL image has been released, or unmap will fail
        # with an outstanding memoryview buffer error
        del img

        buf.unmap(info)

        bdata.eos = False

        return True

    def do_aggregate(self, timeout):
        outimg = Image.new('RGBA', (320, 240), 0x00000000)

        bdata = BlendData(outimg)

        self.foreach_sink_pad(self.mix_buffers, bdata)

        data = bdata.outimg.tobytes()

        outbuf = Gst.Buffer.new_allocate(None, len(data), None)
        outbuf.fill(0, data)
        outbuf.pts = bdata.pts
        self.finish_buffer(outbuf)

        # We are EOS when no pad was ready to be aggregated,
        # this would obviously not work for live
        if bdata.eos:
            return Gst.FlowReturn.EOS

        return Gst.FlowReturn.OK


GObject.type_register(Videomixer)
__gstelementfactory__ = ("py_videomixer", Gst.Rank.NONE, Videomixer)