File: jquant-mmx.asm

package info (click to toggle)
libjpeg-turbo 1%3A1.5.1-2
  • links: PTS, VCS
  • area: main
  • in suites: stretch
  • size: 9,068 kB
  • ctags: 6,369
  • sloc: ansic: 43,407; asm: 25,683; sh: 5,436; java: 3,123; makefile: 797; xml: 24
file content (273 lines) | stat: -rw-r--r-- 9,308 bytes parent folder | download | duplicates (8)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
;
; jquant.asm - sample data conversion and quantization (MMX)
;
; Copyright 2009 Pierre Ossman <ossman@cendio.se> for Cendio AB
;
; Based on the x86 SIMD extension for IJG JPEG library
; Copyright (C) 1999-2006, MIYASAKA Masaru.
; For conditions of distribution and use, see copyright notice in jsimdext.inc
;
; This file should be assembled with NASM (Netwide Assembler),
; can *not* be assembled with Microsoft's MASM or any compatible
; assembler (including Borland's Turbo Assembler).
; NASM is available from http://nasm.sourceforge.net/ or
; http://sourceforge.net/project/showfiles.php?group_id=6208
;
; [TAB8]

%include "jsimdext.inc"
%include "jdct.inc"

; --------------------------------------------------------------------------
        SECTION SEG_TEXT
        BITS    32
;
; Load data into workspace, applying unsigned->signed conversion
;
; GLOBAL(void)
; jsimd_convsamp_mmx (JSAMPARRAY sample_data, JDIMENSION start_col,
;                     DCTELEM *workspace);
;

%define sample_data     ebp+8           ; JSAMPARRAY sample_data
%define start_col       ebp+12          ; JDIMENSION start_col
%define workspace       ebp+16          ; DCTELEM *workspace

        align   16
        global  EXTN(jsimd_convsamp_mmx)

EXTN(jsimd_convsamp_mmx):
        push    ebp
        mov     ebp,esp
        push    ebx
;       push    ecx             ; need not be preserved
;       push    edx             ; need not be preserved
        push    esi
        push    edi

        pxor    mm6,mm6                 ; mm6=(all 0's)
        pcmpeqw mm7,mm7
        psllw   mm7,7                   ; mm7={0xFF80 0xFF80 0xFF80 0xFF80}

        mov     esi, JSAMPARRAY [sample_data]   ; (JSAMPROW *)
        mov     eax, JDIMENSION [start_col]
        mov     edi, POINTER [workspace]        ; (DCTELEM *)
        mov     ecx, DCTSIZE/4
        alignx  16,7
.convloop:
        mov     ebx, JSAMPROW [esi+0*SIZEOF_JSAMPROW]   ; (JSAMPLE *)
        mov     edx, JSAMPROW [esi+1*SIZEOF_JSAMPROW]   ; (JSAMPLE *)

        movq    mm0, MMWORD [ebx+eax*SIZEOF_JSAMPLE]    ; mm0=(01234567)
        movq    mm1, MMWORD [edx+eax*SIZEOF_JSAMPLE]    ; mm1=(89ABCDEF)

        mov     ebx, JSAMPROW [esi+2*SIZEOF_JSAMPROW]   ; (JSAMPLE *)
        mov     edx, JSAMPROW [esi+3*SIZEOF_JSAMPROW]   ; (JSAMPLE *)

        movq    mm2, MMWORD [ebx+eax*SIZEOF_JSAMPLE]    ; mm2=(GHIJKLMN)
        movq    mm3, MMWORD [edx+eax*SIZEOF_JSAMPLE]    ; mm3=(OPQRSTUV)

        movq      mm4,mm0
        punpcklbw mm0,mm6               ; mm0=(0123)
        punpckhbw mm4,mm6               ; mm4=(4567)
        movq      mm5,mm1
        punpcklbw mm1,mm6               ; mm1=(89AB)
        punpckhbw mm5,mm6               ; mm5=(CDEF)

        paddw   mm0,mm7
        paddw   mm4,mm7
        paddw   mm1,mm7
        paddw   mm5,mm7

        movq    MMWORD [MMBLOCK(0,0,edi,SIZEOF_DCTELEM)], mm0
        movq    MMWORD [MMBLOCK(0,1,edi,SIZEOF_DCTELEM)], mm4
        movq    MMWORD [MMBLOCK(1,0,edi,SIZEOF_DCTELEM)], mm1
        movq    MMWORD [MMBLOCK(1,1,edi,SIZEOF_DCTELEM)], mm5

        movq      mm0,mm2
        punpcklbw mm2,mm6               ; mm2=(GHIJ)
        punpckhbw mm0,mm6               ; mm0=(KLMN)
        movq      mm4,mm3
        punpcklbw mm3,mm6               ; mm3=(OPQR)
        punpckhbw mm4,mm6               ; mm4=(STUV)

        paddw   mm2,mm7
        paddw   mm0,mm7
        paddw   mm3,mm7
        paddw   mm4,mm7

        movq    MMWORD [MMBLOCK(2,0,edi,SIZEOF_DCTELEM)], mm2
        movq    MMWORD [MMBLOCK(2,1,edi,SIZEOF_DCTELEM)], mm0
        movq    MMWORD [MMBLOCK(3,0,edi,SIZEOF_DCTELEM)], mm3
        movq    MMWORD [MMBLOCK(3,1,edi,SIZEOF_DCTELEM)], mm4

        add     esi, byte 4*SIZEOF_JSAMPROW
        add     edi, byte 4*DCTSIZE*SIZEOF_DCTELEM
        dec     ecx
        jnz     short .convloop

        emms            ; empty MMX state

        pop     edi
        pop     esi
;       pop     edx             ; need not be preserved
;       pop     ecx             ; need not be preserved
        pop     ebx
        pop     ebp
        ret

; --------------------------------------------------------------------------
;
; Quantize/descale the coefficients, and store into coef_block
;
; This implementation is based on an algorithm described in
;   "How to optimize for the Pentium family of microprocessors"
;   (http://www.agner.org/assem/).
;
; GLOBAL(void)
; jsimd_quantize_mmx (JCOEFPTR coef_block, DCTELEM *divisors,
;                     DCTELEM *workspace);
;

%define RECIPROCAL(m,n,b) MMBLOCK(DCTSIZE*0+(m),(n),(b),SIZEOF_DCTELEM)
%define CORRECTION(m,n,b) MMBLOCK(DCTSIZE*1+(m),(n),(b),SIZEOF_DCTELEM)
%define SCALE(m,n,b)      MMBLOCK(DCTSIZE*2+(m),(n),(b),SIZEOF_DCTELEM)
%define SHIFT(m,n,b)      MMBLOCK(DCTSIZE*3+(m),(n),(b),SIZEOF_DCTELEM)

%define coef_block      ebp+8           ; JCOEFPTR coef_block
%define divisors        ebp+12          ; DCTELEM *divisors
%define workspace       ebp+16          ; DCTELEM *workspace

        align   16
        global  EXTN(jsimd_quantize_mmx)

EXTN(jsimd_quantize_mmx):
        push    ebp
        mov     ebp,esp
;       push    ebx             ; unused
;       push    ecx             ; unused
;       push    edx             ; need not be preserved
        push    esi
        push    edi

        mov     esi, POINTER [workspace]
        mov     edx, POINTER [divisors]
        mov     edi, JCOEFPTR [coef_block]
        mov     ah, 2
        alignx  16,7
.quantloop1:
        mov     al, DCTSIZE2/8/2
        alignx  16,7
.quantloop2:
        movq    mm2, MMWORD [MMBLOCK(0,0,esi,SIZEOF_DCTELEM)]
        movq    mm3, MMWORD [MMBLOCK(0,1,esi,SIZEOF_DCTELEM)]

        movq    mm0,mm2
        movq    mm1,mm3

        psraw   mm2,(WORD_BIT-1)  ; -1 if value < 0, 0 otherwise
        psraw   mm3,(WORD_BIT-1)

        pxor    mm0,mm2   ; val = -val
        pxor    mm1,mm3
        psubw   mm0,mm2
        psubw   mm1,mm3

        ;
        ; MMX is an annoyingly crappy instruction set. It has two
        ; misfeatures that are causing problems here:
        ;
        ; - All multiplications are signed.
        ;
        ; - The second operand for the shifts is not treated as packed.
        ;
        ;
        ; We work around the first problem by implementing this algorithm:
        ;
        ; unsigned long unsigned_multiply(unsigned short x, unsigned short y)
        ; {
        ;   enum { SHORT_BIT = 16 };
        ;   signed short sx = (signed short) x;
        ;   signed short sy = (signed short) y;
        ;   signed long sz;
        ;
        ;   sz = (long) sx * (long) sy;     /* signed multiply */
        ;
        ;   if (sx < 0) sz += (long) sy << SHORT_BIT;
        ;   if (sy < 0) sz += (long) sx << SHORT_BIT;
        ;
        ;   return (unsigned long) sz;
        ; }
        ;
        ; (note that a negative sx adds _sy_ and vice versa)
        ;
        ; For the second problem, we replace the shift by a multiplication.
        ; Unfortunately that means we have to deal with the signed issue again.
        ;

        paddw   mm0, MMWORD [CORRECTION(0,0,edx)]   ; correction + roundfactor
        paddw   mm1, MMWORD [CORRECTION(0,1,edx)]

        movq    mm4,mm0   ; store current value for later
        movq    mm5,mm1
        pmulhw  mm0, MMWORD [RECIPROCAL(0,0,edx)]   ; reciprocal
        pmulhw  mm1, MMWORD [RECIPROCAL(0,1,edx)]
        paddw   mm0,mm4         ; reciprocal is always negative (MSB=1),
        paddw   mm1,mm5   ; so we always need to add the initial value
                        ; (input value is never negative as we
                        ; inverted it at the start of this routine)

        ; here it gets a bit tricky as both scale
        ; and mm0/mm1 can be negative
        movq    mm6, MMWORD [SCALE(0,0,edx)]    ; scale
        movq    mm7, MMWORD [SCALE(0,1,edx)]
        movq    mm4,mm0
        movq    mm5,mm1
        pmulhw  mm0,mm6
        pmulhw  mm1,mm7

        psraw   mm6,(WORD_BIT-1)    ; determine if scale is negative
        psraw   mm7,(WORD_BIT-1)

        pand    mm6,mm4             ; and add input if it is
        pand    mm7,mm5
        paddw   mm0,mm6
        paddw   mm1,mm7

        psraw   mm4,(WORD_BIT-1)    ; then check if negative input
        psraw   mm5,(WORD_BIT-1)

        pand    mm4, MMWORD [SCALE(0,0,edx)]    ; and add scale if it is
        pand    mm5, MMWORD [SCALE(0,1,edx)]
        paddw   mm0,mm4
        paddw   mm1,mm5

        pxor    mm0,mm2   ; val = -val
        pxor    mm1,mm3
        psubw   mm0,mm2
        psubw   mm1,mm3

        movq    MMWORD [MMBLOCK(0,0,edi,SIZEOF_DCTELEM)], mm0
        movq    MMWORD [MMBLOCK(0,1,edi,SIZEOF_DCTELEM)], mm1

        add     esi, byte 8*SIZEOF_DCTELEM
        add     edx, byte 8*SIZEOF_DCTELEM
        add     edi, byte 8*SIZEOF_JCOEF
        dec     al
        jnz     near .quantloop2
        dec     ah
        jnz     near .quantloop1        ; to avoid branch misprediction

        emms            ; empty MMX state

        pop     edi
        pop     esi
;       pop     edx             ; need not be preserved
;       pop     ecx             ; unused
;       pop     ebx             ; unused
        pop     ebp
        ret

; For some reason, the OS X linker does not honor the request to align the
; segment unless we do this.
        align   16