1 2 3 4 5 6 7 8 9 10 11 12 13 14 15 16 17 18 19 20 21 22 23 24 25 26 27 28 29 30 31 32 33 34 35 36 37 38 39 40 41 42 43 44 45 46 47 48 49 50 51 52 53 54 55 56 57 58 59 60 61 62 63 64 65 66 67 68 69 70 71 72 73 74 75 76 77 78 79 80 81 82 83 84 85 86 87 88 89 90 91 92 93 94 95 96 97 98 99 100 101 102 103 104 105 106 107 108 109 110 111 112 113 114 115 116 117 118 119 120 121 122 123 124 125 126 127 128 129 130 131 132 133 134 135 136 137 138 139 140 141 142 143 144 145 146 147 148 149 150 151 152 153 154 155 156 157 158 159 160 161 162 163 164 165 166 167 168 169 170 171 172 173 174 175 176 177 178 179 180 181 182 183 184 185 186 187 188 189 190 191 192 193 194 195 196 197 198 199 200 201 202 203 204 205 206 207 208 209 210 211 212 213 214 215 216 217 218 219 220 221 222 223 224 225 226 227 228 229 230 231 232 233 234 235 236 237 238 239 240 241 242 243 244 245 246 247 248 249 250 251 252 253 254 255 256 257 258 259 260 261 262 263 264 265 266 267 268 269 270 271 272 273 274 275 276 277 278 279 280 281 282 283 284 285 286 287 288 289 290 291 292 293 294 295 296 297 298 299 300 301 302 303 304 305 306 307 308 309 310 311 312 313 314 315 316 317 318 319 320 321 322 323 324 325 326 327 328 329 330 331
|
/* Copyright (C) 2001-2021 Artifex Software, Inc.
All Rights Reserved.
This software is provided AS-IS with no warranty, either express or
implied.
This software is distributed under license and may not be copied,
modified or distributed except as expressly authorized under the terms
of the license contained in the file LICENSE in this distribution.
Refer to licensing information at http://www.artifex.com or contact
Artifex Software, Inc., 1305 Grant Avenue - Suite 200, Novato,
CA 94945, U.S.A., +1(415)492-9861, for further information.
*/
#include <assert.h>
/* Oversampled bitmap compression */
#include "std.h"
#include "gstypes.h"
#include "gdebug.h"
#include "gsbitops.h" /* for prototype */
/*
* Define a compile-time option to reverse nibble order in alpha maps.
* Note that this does not reverse bit order within nibbles.
* This option is here for a very specialized purpose and does not
* interact well with the rest of the code.
*/
#ifndef ALPHA_LSB_FIRST
# define ALPHA_LSB_FIRST 0
#endif
/* Count the number of 1-bits in a half-byte. */
static const byte half_byte_1s[16] = {
0, 1, 1, 2, 1, 2, 2, 3, 1, 2, 2, 3, 2, 3, 3, 4
};
/* Count the number of trailing 1s in an up-to-5-bit value, -1. */
static const byte bits5_trailing_1s[32] = {
0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 3,
0, 0, 0, 1, 0, 0, 0, 2, 0, 0, 0, 1, 0, 0, 0, 4
};
/* Count the number of leading 1s in an up-to-5-bit value, -1. */
static const byte bits5_leading_1s[32] = {
0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0,
0, 0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 3, 4
};
/*
* Compress a value between 0 and 2^M to a value between 0 and 2^N-1.
* Possible values of M are 1, 2, 3, or 4; of N are 1, 2, and 4.
* The name of the table is compress_count_M_N.
* As noted below, we require that N <= M.
*/
static const byte compress_1_1[3] = {
0, 1, 1
};
static const byte compress_2_1[5] = {
0, 0, 1, 1, 1
};
static const byte compress_2_2[5] = {
0, 1, 2, 2, 3
};
static const byte compress_3_1[9] = {
0, 0, 0, 0, 1, 1, 1, 1, 1
};
static const byte compress_3_2[9] = {
0, 0, 1, 1, 2, 2, 2, 3, 3
};
static const byte compress_4_1[17] = {
0, 0, 0, 0, 0, 0, 0, 1, 1, 1, 1, 1, 1, 1, 1, 1, 1
};
static const byte compress_4_2[17] = {
0, 0, 0, 0, 1, 1, 1, 1, 2, 2, 2, 2, 2, 3, 3, 3, 3
};
static const byte compress_4_4[17] = {
0, 1, 2, 3, 4, 5, 6, 7, 8, 8, 9, 10, 11, 12, 13, 14, 15
};
/* The table of tables is indexed by log2(N) and then by M-1. */
static const byte *const compress_tables[4][4] = {
{compress_1_1, compress_2_1, compress_3_1, compress_4_1},
{0, compress_2_2, compress_3_2, compress_4_2},
{0, 0, 0, compress_4_4}
};
/*
* Compress an XxY-oversampled bitmap to Nx1 by counting 1-bits. The X and
* Y oversampling factors are 1, 2, or 4, but may be different. N, the
* resulting number of (alpha) bits per pixel, may be 1, 2, or 4; we allow
* compression in place, in which case N must not exceed the X oversampling
* factor. Width and height are the source dimensions, and hence reflect
* the oversampling; both are multiples of the relevant scale factor. The
* same is true for srcx.
*/
void
bits_compress_scaled(const byte * src, int srcx, uint width, uint height,
uint sraster, byte * dest, uint draster,
const gs_log2_scale_point *plog2_scale, int log2_out_bits)
{
int log2_x = plog2_scale->x, log2_y = plog2_scale->y;
int xscale = 1 << log2_x;
int yscale = 1 << log2_y;
int out_bits = 1 << log2_out_bits;
/*
* The following two initializations are only needed (and the variables
* are only used) if out_bits <= xscale. We do them in all cases only
* to suppress bogus "possibly uninitialized variable" warnings from
* certain compilers.
*/
int input_byte_out_bits = out_bits << (3 - log2_x);
byte input_byte_out_mask = (1 << input_byte_out_bits) - 1;
const byte *table =
compress_tables[log2_out_bits][log2_x + log2_y - 1];
uint sskip = sraster << log2_y;
uint dwidth = (width >> log2_x) << log2_out_bits;
uint dskip = draster - ((dwidth + 7) >> 3);
uint mask = (1 << xscale) - 1;
uint count_max = 1 << (log2_x + log2_y);
/*
* For the moment, we don't attempt to take advantage of the fact
* that the input is aligned.
*/
const byte *srow = src + (srcx >> 3);
int in_shift_initial = 8 - xscale - (srcx & 7);
int in_shift_check = (out_bits <= xscale ? 8 - xscale : -1);
byte *d = dest;
uint h;
/* Assert some preconditions are satisfied: */
/* log2_x and log2_y must each be 0, 1 or 2. */
assert(log2_x >= 0 && log2_x < 3);
assert(log2_y >= 0 && log2_y < 3);
/* srcx and width must be multiple of xscale. */
assert(srcx % xscale == 0);
assert(width % xscale == 0);
/* height must be multiple of yscale. */
assert(height % yscale == 0);
/* because xscale is 1, 2 or 4 and srcx is a multiple of xscale,
in_shift_initial ends up being constrained as follows: */
if (log2_x == 0) {
/* in_shift_initial is {0,1,2,3,4,5,6,7]} */
assert(in_shift_initial >= 0 && in_shift_initial < 8);
}
if (log2_x == 1) {
/* in_shift_initial is {0,2,4,6}. */
assert(in_shift_initial >= 0 && in_shift_initial < 7 && in_shift_initial % 2 == 0);
}
if (log2_x == 2) {
/* in_shift_initial is {0,4} */
assert(in_shift_initial == 0 || in_shift_initial == 4);
}
for (h = height; h; srow += sskip, h -= yscale) {
const byte *s = srow;
#if ALPHA_LSB_FIRST
# define out_shift_initial 0
# define out_shift_update(out_shift, nbits) ((out_shift += (nbits)) >= 8)
#else
# define out_shift_initial (8 - out_bits)
# define out_shift_update(out_shift, nbits) ((out_shift -= (nbits)) < 0)
#endif
int out_shift = out_shift_initial;
byte out = 0;
int in_shift = in_shift_initial;
int dw = 8 - (srcx & 7);
int w;
/* Loop over source bytes. */
for (w = width; w > 0; w -= dw, dw = 8) {
int index;
int in_shift_final = (w >= dw ? 0 : dw - w);
/*
* Check quickly for all-0s or all-1s, but only if each
* input byte generates no more than one output byte,
* we're at an input byte boundary, and we're processing
* an entire input byte (i.e., this isn't a final
* partial byte.)
*/
if (in_shift == in_shift_check && in_shift_final == 0)
switch (*s) {
case 0:
for (index = sraster; index != sskip; index += sraster)
if (s[index] != 0)
goto p;
if (out_shift_update(out_shift, input_byte_out_bits))
*d++ = out, out_shift &= 7, out = 0;
s++;
continue;
#if !ALPHA_LSB_FIRST /* too messy to make it work */
case 0xff:
for (index = sraster; index != sskip; index += sraster)
if (s[index] != 0xff)
goto p;
{
int shift =
(out_shift -= input_byte_out_bits) + out_bits;
if (shift > 0)
out |= input_byte_out_mask << shift;
else {
out |= input_byte_out_mask >> -shift;
*d++ = out;
out_shift += 8;
out = input_byte_out_mask << (8 + shift);
}
}
s++;
continue;
#endif
default:
;
}
p: /* Loop over source pixels within a byte. */
do {
uint count;
for (index = 0, count = 0; index != sskip;
index += sraster
) {
/* Coverity 94484 incorrectly thinks in_shift can be negative. */
/* coverity[negative_shift] */
count += half_byte_1s[(s[index] >> in_shift) & mask];
}
if (count != 0 && table[count] == 0) { /* Look at adjacent cells to help prevent */
/* dropouts. */
uint orig_count = count;
uint shifted_mask = mask << in_shift;
byte in;
if_debug3('B', "[B]count(%d,%d)=%d\n",
(width - w) / xscale,
(height - h) / yscale, count);
if (yscale > 1) { /* Look at the next "lower" cell. */
if (h < height && (in = s[0] & shifted_mask) != 0) {
uint lower;
for (index = 0, lower = 0;
-(index -= sraster) <= sskip &&
(in &= s[index]) != 0;
)
lower += half_byte_1s[in >> in_shift];
if_debug1('B', "[B] lower adds %d\n",
lower);
if (lower <= orig_count)
count += lower;
}
/* Look at the next "higher" cell. */
if (h > yscale && (in = s[sskip - sraster] & shifted_mask) != 0) {
uint upper;
for (index = sskip, upper = 0;
index < sskip << 1 &&
(in &= s[index]) != 0;
index += sraster
)
upper += half_byte_1s[in >> in_shift];
if_debug1('B', "[B] upper adds %d\n",
upper);
if (upper < orig_count)
count += upper;
}
}
if (xscale > 1) {
uint mask1 = (mask << 1) + 1;
/* Look at the next cell to the left. */
if (w < width) {
int lshift = in_shift + xscale - 1;
uint left;
for (index = 0, left = 0;
index < sskip; index += sraster
) {
uint bits =
((s[index - 1] << 8) +
s[index]) >> lshift;
left += bits5_trailing_1s[bits & mask1];
}
if_debug1('B', "[B] left adds %d\n",
left);
if (left < orig_count)
count += left;
}
/* Look at the next cell to the right. */
if (w > xscale) {
int rshift = in_shift - xscale + 8;
uint right;
for (index = 0, right = 0;
index < sskip; index += sraster
) {
uint bits =
((s[index] << 8) +
s[index + 1]) >> rshift;
right += bits5_leading_1s[(bits & mask1) << (4 - xscale)];
}
if_debug1('B', "[B] right adds %d\n",
right);
if (right <= orig_count)
count += right;
}
}
if (count > count_max)
count = count_max;
}
out += table[count] << out_shift;
if (out_shift_update(out_shift, out_bits))
*d++ = out, out_shift &= 7, out = 0;
}
while ((in_shift -= xscale) >= in_shift_final);
s++, in_shift += 8;
}
if (out_shift != out_shift_initial)
*d++ = out;
for (w = dskip; w != 0; w--)
*d++ = 0;
#undef out_shift_initial
#undef out_shift_update
}
}
|