File: stretch.cpp

package info (click to toggle)
stellarsolver 2.7-1
  • links: PTS, VCS
  • area: main
  • in suites: forky, sid
  • size: 34,112 kB
  • sloc: ansic: 31,587; cpp: 15,103; python: 186; sh: 74; pascal: 67; perl: 9; makefile: 2
file content (449 lines) | stat: -rw-r--r-- 16,791 bytes parent folder | download | duplicates (2)
1
2
3
4
5
6
7
8
9
10
11
12
13
14
15
16
17
18
19
20
21
22
23
24
25
26
27
28
29
30
31
32
33
34
35
36
37
38
39
40
41
42
43
44
45
46
47
48
49
50
51
52
53
54
55
56
57
58
59
60
61
62
63
64
65
66
67
68
69
70
71
72
73
74
75
76
77
78
79
80
81
82
83
84
85
86
87
88
89
90
91
92
93
94
95
96
97
98
99
100
101
102
103
104
105
106
107
108
109
110
111
112
113
114
115
116
117
118
119
120
121
122
123
124
125
126
127
128
129
130
131
132
133
134
135
136
137
138
139
140
141
142
143
144
145
146
147
148
149
150
151
152
153
154
155
156
157
158
159
160
161
162
163
164
165
166
167
168
169
170
171
172
173
174
175
176
177
178
179
180
181
182
183
184
185
186
187
188
189
190
191
192
193
194
195
196
197
198
199
200
201
202
203
204
205
206
207
208
209
210
211
212
213
214
215
216
217
218
219
220
221
222
223
224
225
226
227
228
229
230
231
232
233
234
235
236
237
238
239
240
241
242
243
244
245
246
247
248
249
250
251
252
253
254
255
256
257
258
259
260
261
262
263
264
265
266
267
268
269
270
271
272
273
274
275
276
277
278
279
280
281
282
283
284
285
286
287
288
289
290
291
292
293
294
295
296
297
298
299
300
301
302
303
304
305
306
307
308
309
310
311
312
313
314
315
316
317
318
319
320
321
322
323
324
325
326
327
328
329
330
331
332
333
334
335
336
337
338
339
340
341
342
343
344
345
346
347
348
349
350
351
352
353
354
355
356
357
358
359
360
361
362
363
364
365
366
367
368
369
370
371
372
373
374
375
376
377
378
379
380
381
382
383
384
385
386
387
388
389
390
391
392
393
394
395
396
397
398
399
400
401
402
403
404
405
406
407
408
409
410
411
412
413
414
415
416
417
418
419
420
421
422
423
424
425
426
427
428
429
430
431
432
433
434
435
436
437
438
439
440
441
442
443
444
445
446
447
448
449
/*  Stretch

    This application is free software; you can redistribute it and/or
    modify it under the terms of the GNU General Public
    License as published by the Free Software Foundation; either
    version 2 of the License, or (at your option) any later version.
*/

//Qt Includes
#include <QtConcurrent>

//System Includes
#include <math.h>

//CFITSIO Includes
#include <fitsio.h>

//SEP Includes
#include "sep/sep.h"

//Project Includes
#include "stretch.h"

namespace {

// Returns the median value of the vector.
// The vector is modified in an undefined way.
template <typename T>
T median(std::vector<T>& values)
{
  const int middle = values.size() / 2;
  std::nth_element(values.begin(), values.begin() + middle, values.end());
  return values[middle];
}

// Returns the rough max of the buffer.
template <typename T>
T sampledMax(T *values, int size, int sampleBy)
{
    T maxVal = 0;
    for (int i = 0; i < size; i+= sampleBy)
        if (maxVal < values[i])
            maxVal = values[i];
    return  maxVal;
}

// Returns the median of the sample values.
// The values are not modified.
template <typename T>
T median(T *values, int size, int sampleBy)
{
  const int downsampled_size = size / sampleBy;
  std::vector<T> samples(downsampled_size);
  for (int index = 0, i = 0; i < downsampled_size; ++i, index += sampleBy)
    samples[i] = values[index];
  return median(samples);
}

// This stretches one channel given the input parameters.
// Based on the spec in section 8.5.6
// https://pixinsight.com/doc/docs/XISF-1.0-spec/XISF-1.0-spec.html
// Uses multiple threads, blocks until done.
// The extension parameters are not used.
// Sampling is applied to the output (that is, with sampling=2, we compute every other output
// sample both in width and height, so the output would have about 4X fewer pixels.
template <typename T>
void stretchOneChannel(T *input_buffer, QImage *output_image,
                       const StretchParams& stretch_params, 
                       int input_range, int image_height, int image_width, int sampling)
{
  QVector<QFuture<void>> futures;

  // We're outputting uint8, so the max output is 255.
  constexpr int maxOutput = 255;

  // Maximum possible input value (e.g. 1024*64 - 1 for a 16 bit unsigned int).
  const float maxInput = input_range > 1 ? input_range - 1 : input_range;
  
  const float midtones   = stretch_params.grey_red.midtones;
  const float highlights = stretch_params.grey_red.highlights;
  const float shadows    = stretch_params.grey_red.shadows;

  // Precomputed expressions moved out of the loop.
  // hightlights - shadows, protecting for divide-by-0, in a 0->1.0 scale.
  const float hsRangeFactor = highlights == shadows ? 1.0f : 1.0f / (highlights - shadows);
  // Shadow and highlight values translated to the ADU scale.
  const T nativeShadows = shadows * maxInput;
  const T nativeHighlights = highlights * maxInput;
  // Constants based on above needed for the stretch calculations.
  const float k1 = (midtones - 1) * hsRangeFactor * maxOutput / maxInput;
  const float k2 = ((2 * midtones) - 1) * hsRangeFactor / maxInput;
  
  // Increment the input index by the sampling, the output index increments by 1.
  for (int j = 0, jout = 0; j < image_height; j+=sampling, jout++)
  {
    futures.append(QtConcurrent::run([ = ]()
    {
        T * inputLine  = input_buffer + j * image_width;
        auto * scanLine = output_image->scanLine(jout);
        
    for (int i = 0, iout = 0; i < image_width; i+=sampling, iout++)
        {
          const T input = inputLine[i];
          if (input < nativeShadows) scanLine[iout] = 0;
          else if (input >= nativeHighlights) scanLine[iout] = maxOutput;
          else 
          {
            const T inputFloored = (input - nativeShadows);
            scanLine[iout] = (inputFloored * k1) / (inputFloored * k2 - midtones);
	  }
        }
    }));
  }
  for(QFuture<void> future : futures)
    future.waitForFinished();
}

// This is like the above 1-channel stretch, but extended for 3 channels.
// This could have been more modular, but the three channels are combined
// into a single qRgb value at the end, so it seems the simplest thing is to
// replicate the code. It is assume the colors are not interleaved--the red image
// is stored fully, then the green, then the blue.
// Sampling is applied to the output (that is, with sampling=2, we compute every other output
// sample both in width and height, so the output would have about 4X fewer pixels.
template <typename T>
void stretchThreeChannels(T *inputBuffer, QImage *outputImage,
                          const StretchParams& stretchParams, 
                          int inputRange, int imageHeight, int imageWidth, int sampling)
{
  QVector<QFuture<void>> futures;

  // We're outputting uint8, so the max output is 255.
  constexpr int maxOutput = 255;

  // Maximum possible input value (e.g. 1024*64 - 1 for a 16 bit unsigned int).
  const float maxInput = inputRange > 1 ? inputRange - 1 : inputRange;
  
  const float midtonesR   = stretchParams.grey_red.midtones;
  const float highlightsR = stretchParams.grey_red.highlights;
  const float shadowsR    = stretchParams.grey_red.shadows;
  const float midtonesG   = stretchParams.green.midtones;
  const float highlightsG = stretchParams.green.highlights;
  const float shadowsG    = stretchParams.green.shadows;
  const float midtonesB   = stretchParams.blue.midtones;
  const float highlightsB = stretchParams.blue.highlights;
  const float shadowsB    = stretchParams.blue.shadows;

  // Precomputed expressions moved out of the loop.
  // hightlights - shadows, protecting for divide-by-0, in a 0->1.0 scale.
  const float hsRangeFactorR = highlightsR == shadowsR ? 1.0f : 1.0f / (highlightsR - shadowsR);
  const float hsRangeFactorG = highlightsG == shadowsG ? 1.0f : 1.0f / (highlightsG - shadowsG);
  const float hsRangeFactorB = highlightsB == shadowsB ? 1.0f : 1.0f / (highlightsB - shadowsB);
  // Shadow and highlight values translated to the ADU scale.
  const T nativeShadowsR = shadowsR * maxInput;
  const T nativeShadowsG = shadowsG * maxInput;
  const T nativeShadowsB = shadowsB * maxInput;
  const T nativeHighlightsR = highlightsR * maxInput;
  const T nativeHighlightsG = highlightsG * maxInput;
  const T nativeHighlightsB = highlightsB * maxInput;
  // Constants based on above needed for the stretch calculations.
  const float k1R = (midtonesR - 1) * hsRangeFactorR * maxOutput / maxInput;
  const float k1G = (midtonesG - 1) * hsRangeFactorG * maxOutput / maxInput;
  const float k1B = (midtonesB - 1) * hsRangeFactorB * maxOutput / maxInput;
  const float k2R = ((2 * midtonesR) - 1) * hsRangeFactorR / maxInput;
  const float k2G = ((2 * midtonesG) - 1) * hsRangeFactorG / maxInput;
  const float k2B = ((2 * midtonesB) - 1) * hsRangeFactorB / maxInput;
  
  const int size = imageWidth * imageHeight;
  
  for (int j = 0, jout = 0; j < imageHeight; j+=sampling, jout++)
  {
    futures.append(QtConcurrent::run([ = ]()
    {
        // R, G, B input images are stored one after another.
        T * inputLineR  = inputBuffer + j * imageWidth;
        T * inputLineG  = inputLineR + size;
        T * inputLineB  = inputLineG + size;
        
        auto * scanLine = reinterpret_cast<QRgb*>(outputImage->scanLine(jout));
        
    for (int i = 0, iout = 0; i < imageWidth; i+=sampling, iout++)
        {
          const T inputR = inputLineR[i];
          const T inputG = inputLineG[i];
          const T inputB = inputLineB[i];

          uint8_t red, green, blue;
          
          if (inputR < nativeShadowsR) red = 0;
          else if (inputR >= nativeHighlightsR) red = maxOutput;
          else 
          {
            const T inputFloored = (inputR - nativeShadowsR);
            red = (inputFloored * k1R) / (inputFloored * k2R - midtonesR);
	  }

          if (inputG < nativeShadowsG) green = 0;
          else if (inputG >= nativeHighlightsG) green = maxOutput;
          else 
          {
            const T inputFloored = (inputG - nativeShadowsG);
            green = (inputFloored * k1G) / (inputFloored * k2G - midtonesG);
	  }

          if (inputB < nativeShadowsB) blue = 0;
          else if (inputB >= nativeHighlightsB) blue = maxOutput;
          else 
          {
            const T inputFloored = (inputB - nativeShadowsB);
            blue = (inputFloored * k1B) / (inputFloored * k2B - midtonesB);
	  }
          scanLine[iout] = qRgb(red, green, blue);
        }
    }));
  }
  for(QFuture<void> future : futures)
    future.waitForFinished();
}

template <typename T>
void stretchChannels(T *input_buffer, QImage *output_image,
                       const StretchParams& stretch_params, 
                     int input_range, int image_height, int image_width, int num_channels, int sampling)
{
    if (num_channels == 1)
      stretchOneChannel(input_buffer, output_image, stretch_params, input_range,
                        image_height, image_width, sampling);
    else if (num_channels == 3)
      stretchThreeChannels(input_buffer, output_image, stretch_params, input_range,
                           image_height, image_width, sampling);
}
  
// See section 8.5.7 in above link  https://pixinsight.com/doc/docs/XISF-1.0-spec/XISF-1.0-spec.html
template <typename T>
void computeParamsOneChannel(T *buffer, StretchParams1Channel *params, 
                             int inputRange, int height, int width)
{
  // Find the median sample.
  constexpr int maxSamples = 500000;
  const int sampleBy = width * height < maxSamples ? 1 : width * height / maxSamples;

  T medianSample = median(buffer, width * height, sampleBy);
  // Find the Median deviation: 1.4826 * median of abs(sample[i] - median).
  const int numSamples = width * height / sampleBy;
  std::vector<T> deviations(numSamples);
  for (int index = 0, i = 0; i < numSamples; ++i, index += sampleBy)
  {
    if (medianSample > buffer[index])
      deviations[i] = medianSample - buffer[index];
    else
      deviations[i] = buffer[index] - medianSample;
  }

  // Shift everything to 0 -> 1.0.
  const float medDev = median(deviations);
  const float normalizedMedian = medianSample / static_cast<float>(inputRange);
  const float MADN = 1.4826 * medDev / static_cast<float>(inputRange);

  const bool upperHalf = normalizedMedian > 0.5;

  const float shadows = (upperHalf || MADN == 0) ? 0.0 :
    fmin(1.0, fmax(0.0, (normalizedMedian + -2.8 * MADN)));

  const float highlights = (!upperHalf || MADN == 0) ? 1.0 :
    fmin(1.0, fmax(0.0, (normalizedMedian - -2.8 * MADN)));

  float X, M;
  constexpr float B = 0.25;
  if (!upperHalf) {
    X = normalizedMedian - shadows;
    M = B;
  } else {
    X = B;
    M = highlights - normalizedMedian;
  }
  float midtones;
  if (X == 0) midtones = 0.0f;
  else if (X == M) midtones = 0.5f;
  else if (X == 1) midtones = 1.0f;
  else midtones = ((M - 1) * X) / ((2 * M - 1) * X - M);

  // Store the params.
  params->shadows = shadows;
  params->highlights = highlights;
  params->midtones = midtones;
  params->shadows_expansion = 0.0;
  params->highlights_expansion = 1.0;
}

// Need to know the possible range of input values.
// Using the type of the sample and guessing.
// Perhaps we should examine the contents for the file
// (e.g. look at maximum value and extrapolate from that).
int getRange(int data_type)
{
    switch (data_type)
    {
        case SEP_TBYTE:
            return 256;
        case TSHORT:
            return 64*1024;
        case TUSHORT:
            return 64*1024;
        case TLONG:
            return 64*1024;
        case TFLOAT:
            return 64*1024;
        case TLONGLONG:
            return 64*1024;
        case TDOUBLE:
            return 64*1024;
        default:
            return 64*1024;
    }
}
  
}  // namespace

Stretch::Stretch(int width, int height, int channels, int data_type)
{
  image_width = width;
  image_height = height;
  image_channels = channels;
  dataType = data_type;
  input_range = getRange(dataType);
}

void Stretch::run(uint8_t *input, QImage *outputImage, int sampling)
{
    Q_ASSERT(outputImage->width() == (image_width + sampling - 1) / sampling);
    Q_ASSERT(outputImage->height() == (image_height + sampling - 1) / sampling);
    recalculateInputRange(input);

    switch (dataType)
    {
        case SEP_TBYTE:
            stretchChannels(reinterpret_cast<uint8_t*>(input), outputImage, params,
                            input_range, image_height, image_width, image_channels, sampling);
            break;
        case TSHORT:
            stretchChannels(reinterpret_cast<short*>(input), outputImage, params,
                            input_range, image_height, image_width, image_channels, sampling);
            break;
        case TUSHORT:
            stretchChannels(reinterpret_cast<unsigned short*>(input), outputImage, params,
                            input_range, image_height, image_width, image_channels, sampling);
            break;
        case TLONG:
            stretchChannels(reinterpret_cast<long*>(input), outputImage, params,
                            input_range, image_height, image_width, image_channels, sampling);
            break;
        case TFLOAT:
            stretchChannels(reinterpret_cast<float*>(input), outputImage, params,
                            input_range, image_height, image_width, image_channels, sampling);
            break;
        case TLONGLONG:
            stretchChannels(reinterpret_cast<long long*>(input), outputImage, params,
                            input_range, image_height, image_width, image_channels, sampling);
            break;
        case TDOUBLE:
            stretchChannels(reinterpret_cast<double*>(input), outputImage, params,
                            input_range, image_height, image_width, image_channels, sampling);
            break;
        default:
        break;
    }
}

// The input range for float/double is ambiguous, and we can't tell without the buffer,
// so we set it to 64K and possibly reduce it when we see the data.
void Stretch::recalculateInputRange(uint8_t *input)
{
    if (input_range <= 1) return;
    if (dataType != TFLOAT && dataType != TDOUBLE) return;

    float mx = 0;
    if (dataType == TFLOAT)
        mx = sampledMax(reinterpret_cast<float*>(input), image_height * image_width, 1000);
    else if (dataType == TDOUBLE)
        mx = sampledMax(reinterpret_cast<double*>(input), image_height * image_width, 1000);
    if (mx <= 1.01f) input_range = 1;
}

StretchParams Stretch::computeParams(uint8_t *input)
{
  recalculateInputRange(input);
  StretchParams result;
  for (int channel = 0; channel < image_channels; ++channel)
  {
    int offset = channel * image_width * image_height;
    StretchParams1Channel *params = channel == 0 ? &result.grey_red :
      (channel == 1 ? &result.green : &result.blue);
    switch (dataType)
    {
        case SEP_TBYTE:
        {
            auto buffer = reinterpret_cast<uint8_t*>(input);
            computeParamsOneChannel(buffer + offset, params, input_range,
                                    image_height, image_width);
            break;
        }
        case TSHORT:
        {
            auto buffer = reinterpret_cast<short*>(input);
            computeParamsOneChannel(buffer + offset, params, input_range,
                                    image_height, image_width);
            break;
        }
        case TUSHORT:
        {
            auto buffer = reinterpret_cast<unsigned short*>(input);
            computeParamsOneChannel(buffer + offset, params, input_range,
                                    image_height, image_width);
            break;
        }
        case TLONG:
        {
            auto buffer = reinterpret_cast<long*>(input);
            computeParamsOneChannel(buffer + offset, params, input_range,
                                    image_height, image_width);
            break;
        }
        case TFLOAT:
        {
            auto buffer = reinterpret_cast<float*>(input);
            computeParamsOneChannel(buffer + offset, params, input_range,
                                    image_height, image_width);
            break;
        }
        case TLONGLONG:
        {
            auto buffer = reinterpret_cast<long long*>(input);
            computeParamsOneChannel(buffer + offset, params, input_range,
                                    image_height, image_width);
            break;
        }
        case TDOUBLE:
        {
            auto buffer = reinterpret_cast<double*>(input);
            computeParamsOneChannel(buffer + offset, params, input_range,
                                    image_height, image_width);
            break;
        }
        default:
        break;
    }
  }
  return result;
}