Coverage Report

Created: 2025-06-15 00:57

/src/cmp_tool/lib/decompress/decmp.c
Line
Count
Source (jump to first uncovered line)
1
/**
2
 * @file   decmp.c
3
 * @author Dominik Loidolt (dominik.loidolt@univie.ac.at)
4
 * @date   2020
5
 *
6
 * @copyright GPLv2
7
 * This program is free software; you can redistribute it and/or modify it
8
 * under the terms and conditions of the GNU General Public License,
9
 * version 2, as published by the Free Software Foundation.
10
 *
11
 * This program is distributed in the hope it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
 * more details.
15
 *
16
 * @brief software decompression library
17
 * @see Data Compression User Manual PLATO-UVIE-PL-UM-0001
18
 *
19
 * To decompress a compression entity (consisting of a compression entity header
20
 * and the compressed data) use the decompress_cmp_entiy() function.
21
 *
22
 * @warning not intended for use with the flight software
23
 */
24
25
26
#include <stdint.h>
27
#include <limits.h>
28
#include <string.h>
29
#include <assert.h>
30
31
#include "../common/byteorder.h"
32
#include "../common/compiler.h"
33
34
#include "read_bitstream.h"
35
#include "../common/cmp_data_types.h"
36
#include "../decmp.h"
37
#include "../common/cmp_debug.h"
38
#include "../common/cmp_support.h"
39
#include "../common/cmp_entity.h"
40
#include "../common/cmp_cal_up_model.h"
41
#include "../common/cmp_max_used_bits.h"
42
43
44
136
#define CORRUPTION_DETECTED (-1)
45
46
47
MAYBE_UNUSED static const char *please_check_str =
48
  "Please check that the compression parameters match those used to compress the data and that the compressed data are not corrupted.";
49
50
51
/**
52
 * @brief function pointer to a code word decoder function
53
 */
54
55
typedef uint32_t(*decoder_ptr)(struct bit_decoder *, uint32_t, uint32_t);
56
57
58
/**
59
 * @brief structure to hold all parameters to decode a value
60
 */
61
62
struct decoder_setup {
63
  int (*decode_method_f)(const struct decoder_setup *setup,
64
             uint32_t *decoded_value); /* pointer to the decoding function with escape mechanism */
65
  decoder_ptr decode_cw_f; /* pointer to the code word decoder function (Golomb/Rice/unary) */
66
  struct bit_decoder *dec; /* pointer to a bit_decoder context */
67
  uint32_t encoder_par1;   /* encoding parameter 1 */
68
  uint32_t encoder_par2;   /* encoding parameter 2 */
69
  uint32_t outlier_par;    /* outlier parameter */
70
  uint32_t lossy_par;      /* lossy compression parameter */
71
  uint32_t max_data_bits;  /* bit length of the decoded value */
72
};
73
74
75
enum decmp_type {ICU_DECOMRESSION, RDCU_DECOMPRESSION};
76
77
78
/**
79
 * @brief decode the next unary code word in the bitstream
80
 *
81
 * @param dec   a pointer to a bit_decoder context
82
 * @param m   this parameter is not used
83
 * @param log2_m  this parameter is not used
84
 * @note: Can be used to decode a code word with compression parameter m = 1 (log2_m = 0)
85
 *
86
 * @returns the decoded value
87
 */
88
89
static __inline uint32_t unary_decoder(struct bit_decoder *dec, uint32_t m UNUSED,
90
               uint32_t log2_m UNUSED)
91
6.14k
{
92
6.14k
  uint32_t const decoded_cw = bit_peek_leading_ones(dec); /* decode unary coding */
93
6.14k
  uint32_t const cw_len = decoded_cw + 1; /* Number of 1's + following 0 */
94
95
6.14k
  bit_consume_bits(dec, cw_len);
96
97
6.14k
  return decoded_cw;
98
6.14k
}
99
100
101
/**
102
 * @brief decode the next Rice code word in the bitstream
103
 *
104
 * @param dec   a pointer to a bit_decoder context
105
 * @param m   Golomb parameter, must be the same used for encoding
106
 * @param log2_m  Rice parameter, is ilog_2(m), must be larger than 0
107
 * @note the Golomb parameter (m) must be a power of 2
108
 * @warning the Rice parameter (log2_m) must be greater than 0! If you want to
109
 *  use a Rice parameter equal to 0, use the unary_decoder instead.
110
 *
111
 * @returns the decoded value
112
 */
113
114
static uint32_t rice_decoder(struct bit_decoder *dec, uint32_t m, uint32_t log2_m)
115
666
{
116
666
  uint32_t q;  /* quotient */
117
666
  uint32_t r;  /* remainder */
118
119
666
  assert(log2_m > 0 && log2_m < 32);
120
121
666
  q = unary_decoder(dec, m, log2_m); /* decode quotient unary code part */
122
666
  r = bit_read_bits32(dec, log2_m); /* get remainder */
123
124
666
  return (q << log2_m) + r; /* calculate decoded value (q*m+r) */
125
666
}
126
127
128
/**
129
 * @brief decode the next Golomb code word in the bitstream
130
 *
131
 * @param dec   a pointer to a bit_decoder context
132
 * @param m   Golomb parameter (has to be bigger than 0)
133
 * @param log2_m  is ilog_2(m) calculate outside function for better
134
 *      performance
135
 *
136
 * @returns the decoded value
137
 */
138
139
static uint32_t golomb_decoder(struct bit_decoder *dec, uint32_t m, uint32_t log2_m)
140
3.97k
{
141
3.97k
  uint32_t q;  /* quotient */
142
3.97k
  uint32_t r1; /* remainder case 1 */
143
3.97k
  uint32_t r2; /* remainder case 2 */
144
3.97k
  uint32_t r;  /* remainder */
145
3.97k
  uint32_t cutoff; /* cutoff between group 1 and 2 */
146
147
3.97k
  assert(m > 0);
148
3.97k
  assert(log2_m == ilog_2(m));
149
150
  /* decode quotient unary code part */
151
3.97k
  q = unary_decoder(dec, m, log2_m);
152
153
  /* get the remainder code for both cases */
154
3.97k
  r2 = (uint32_t)bit_peek_bits(dec, log2_m+1);
155
3.97k
  r1 = r2 >> 1;
156
157
  /* calculate cutoff between case 1 and 2 */
158
3.97k
  cutoff = (0x2U << log2_m) - m; /* = 2^(log2_m+1)-m */
159
160
3.97k
  if (r1 < cutoff) { /* remainder case 1: remainder length=log2_m */
161
2.81k
    bit_consume_bits(dec, log2_m);
162
2.81k
    r = r1;
163
2.81k
  } else { /* remainder case 2: remainder length = log2_m+1 */
164
1.16k
    bit_consume_bits(dec, log2_m+1);
165
1.16k
    r = r2 - cutoff;
166
1.16k
  }
167
168
3.97k
  return q*m + r;
169
3.97k
}
170
171
172
/**
173
 * @brief select the decoder based on the used Golomb parameter
174
 *
175
 * @param golomb_par  Golomb parameter, has to be bigger than 0
176
 *
177
 * @note if the Golomb parameter is a power of 2 we can use the faster Rice decoder
178
 * @note if the Golomb parameter is 1 we can use the even faster unary decoder
179
 *
180
 * @returns function pointer to the select code word decoder function
181
 */
182
183
static decoder_ptr select_decoder(uint32_t golomb_par)
184
1.46k
{
185
1.46k
  assert(golomb_par > 0);
186
187
1.46k
  if (golomb_par == 1)
188
89
    return &unary_decoder;
189
190
1.37k
  if (is_a_pow_of_2(golomb_par))
191
155
    return &rice_decoder;
192
1.21k
  else
193
1.21k
    return &golomb_decoder;
194
1.37k
}
195
196
197
/**
198
 * @brief decode the next code word with zero escape system mechanism from the bitstream
199
 *
200
 * @param setup   pointer to the decoder setup
201
 * @param decoded_value points to the location where the decoded value is stored
202
 *
203
 * @returns 0 on success; otherwise error
204
 */
205
206
static int decode_zero(const struct decoder_setup *setup, uint32_t *decoded_value)
207
2.20k
{
208
  /* Decode the next value in the bitstream with the Golomb/Rice/unary decoder */
209
2.20k
  *decoded_value = setup->decode_cw_f(setup->dec, setup->encoder_par1, setup->encoder_par2);
210
211
2.20k
  if (*decoded_value != 0) { /* no escape symbol detected */
212
1.43k
    if (*decoded_value >= setup->outlier_par) {
213
32
      debug_print("Error: Data consistency check failed. Non-outlier decoded value greater or equal than the outlier parameter. %s", please_check_str);
214
32
      return CORRUPTION_DETECTED;
215
32
    }
216
1.40k
    *decoded_value -= 1;
217
1.40k
  } else {
218
    /* the zero escape symbol mechanism was used; read unencoded value */
219
769
    bit_refill(setup->dec);
220
769
    *decoded_value = bit_read_bits32_sub_1(setup->dec, setup->max_data_bits);
221
222
769
    if (*decoded_value < setup->outlier_par - 1) { /* -1 because we subtract -1 from the *decoded_value */
223
35
      if (bit_refill(setup->dec) != BIT_OVERFLOW)
224
24
        debug_print("Error: Data consistency check failed. Outlier small than the outlier parameter. %s", please_check_str);
225
35
      return CORRUPTION_DETECTED;
226
35
    }
227
769
  }
228
2.13k
  return bit_refill(setup->dec) == BIT_OVERFLOW;
229
2.20k
}
230
231
232
/**
233
 * @brief decode the next code word with the multi escape mechanism from the bitstream
234
 *
235
 * @param setup   pointer to the decoder setup
236
 * @param decoded_value points to the location where the decoded value is stored
237
 *
238
 * @returns 0 on success; otherwise error
239
 */
240
241
static int decode_multi(const struct decoder_setup *setup, uint32_t *decoded_value)
242
3.94k
{
243
  /* Decode the next value in the bitstream with the Golomb/Rice/unary decoder */
244
3.94k
  *decoded_value = setup->decode_cw_f(setup->dec, setup->encoder_par1, setup->encoder_par2);
245
246
3.94k
  if (*decoded_value >= setup->outlier_par) { /* escape symbol mechanism detected */
247
310
    uint32_t const unencoded_len = (*decoded_value - setup->outlier_par + 1) << 1;
248
249
310
    if (unencoded_len > ((setup->max_data_bits+1) & -2U)) { /* round up max_data_bits to the nearest multiple of 2 */
250
55
      debug_print("Error: Data consistency check failed. Multi escape symbol higher than expected. %s", please_check_str);
251
55
      return CORRUPTION_DETECTED;
252
55
    }
253
254
    /* read unencoded value */
255
255
    bit_refill(setup->dec);
256
255
    *decoded_value = bit_read_bits32(setup->dec, unencoded_len);
257
258
255
    if (*decoded_value >> (unencoded_len-2) == 0) { /* check if at least one bit of the two highest is set. */
259
84
      if (unencoded_len > 2) { /* Exception: if we code outlier_par as outlier, no set bit is expected */
260
8
        if (bit_refill(setup->dec) != BIT_OVERFLOW)
261
7
          debug_print("Error: Data consistency check failed. Unencoded data after multi escape symbol to small. %s", please_check_str);
262
8
        return CORRUPTION_DETECTED;
263
8
      }
264
84
    }
265
266
247
    *decoded_value += setup->outlier_par;
267
268
247
    if ((*decoded_value & BIT_MASK[setup->max_data_bits]) < setup->outlier_par) { /* check for overflow in addition */
269
6
      if (bit_refill(setup->dec) != BIT_OVERFLOW)
270
5
        debug_print("Error: Data consistency check failed. Outlier small than the outlier parameter. %s", please_check_str);
271
6
      return CORRUPTION_DETECTED;
272
6
    }
273
247
  }
274
3.87k
  return bit_refill(setup->dec) == BIT_OVERFLOW;
275
3.94k
}
276
277
278
/**
279
 * @brief remap an unsigned value back to a signed value
280
 * @note this is the reverse function of map_to_pos()
281
 *
282
 * @param value_to_unmap  unsigned value to remap
283
 *
284
 * @returns the signed remapped value
285
 */
286
287
static __inline uint32_t re_map_to_pos(uint32_t value_to_unmap)
288
6.14k
{
289
6.14k
  if (value_to_unmap & 0x1) { /* if uneven */
290
    /* uint64_t to prevent overflow if value_to_unmap == 0xFFFFFFFF */
291
2.68k
    uint64_t const tmp64 = value_to_unmap;
292
293
2.68k
    return (uint32_t)(-((tmp64 + 1) / 2));
294
3.46k
  } else {
295
3.46k
    return value_to_unmap / 2;
296
3.46k
  }
297
6.14k
}
298
299
300
/**
301
 * @brief decompress the next code word in the bitstream and decorrelate it with
302
 *  the model
303
 *
304
 * @param setup   pointer to the decoder setup
305
 * @param decoded_value points to the location where the decoded value is stored
306
 * @param model   model of the decoded_value (0 if not used)
307
 *
308
 * @returns 0 on success; otherwise error
309
 */
310
311
static int decode_value(const struct decoder_setup *setup, uint32_t *decoded_value,
312
      uint32_t model)
313
6.14k
{
314
  /* decode the next value from the bitstream */
315
6.14k
  int const err = setup->decode_method_f(setup, decoded_value);
316
317
  /* map the unsigned decode value back to a signed value */
318
6.14k
  *decoded_value = re_map_to_pos(*decoded_value);
319
320
  /* decorrelate data the data with the model */
321
6.14k
  *decoded_value += round_fwd(model, setup->lossy_par);
322
323
  /* we mask only the used bits in case there is an overflow when adding the model */
324
6.14k
  *decoded_value &= BIT_MASK[setup->max_data_bits];
325
326
  /* inverse step of the lossy compression */
327
6.14k
  *decoded_value = round_inv(*decoded_value, setup->lossy_par);
328
329
6.14k
  return err;
330
6.14k
}
331
332
333
/**
334
 * @brief configure a decoder setup structure to have a setup to decode a value
335
 *
336
 * @param setup   pointer to the decoder setup
337
 * @param dec   pointer to a bit_decoder context
338
 * @param cmp_mode  compression mode
339
 * @param cmp_par compression parameter
340
 * @param spillover spillover_par parameter
341
 * @param lossy_par lossy compression parameter
342
 * @param max_data_bits how many bits are needed to represent the highest possible value
343
 */
344
345
static void configure_decoder_setup(struct decoder_setup *setup, struct bit_decoder *dec,
346
            enum cmp_mode cmp_mode, uint32_t cmp_par,
347
            uint32_t spillover, uint32_t lossy_par,
348
            uint32_t max_data_bits)
349
1.46k
{
350
1.46k
  assert(setup != NULL);
351
1.46k
  assert(dec != NULL);
352
1.46k
  assert(cmp_par != 0);
353
1.46k
  assert(max_data_bits > 0 && max_data_bits <= 32);
354
355
1.46k
  if (multi_escape_mech_is_used(cmp_mode))
356
928
    setup->decode_method_f = &decode_multi;
357
532
  else if (zero_escape_mech_is_used(cmp_mode))
358
532
    setup->decode_method_f = &decode_zero;
359
0
  else {
360
0
    debug_print("Error: Compression mode not supported.");
361
0
    assert(0);
362
0
  }
363
1.46k
  setup->decode_cw_f = select_decoder(cmp_par);
364
1.46k
  setup->dec = dec;
365
1.46k
  setup->encoder_par1 = cmp_par; /* encoding parameter 1 */
366
1.46k
  setup->encoder_par2 = ilog_2(cmp_par); /* encoding parameter 2 */
367
1.46k
  setup->outlier_par = spillover; /* outlier parameter */
368
1.46k
  setup->lossy_par = lossy_par; /* lossy compression parameter */
369
1.46k
  setup->max_data_bits = max_data_bits; /* how many bits are needed to represent the highest possible value */
370
1.46k
}
371
372
373
/**
374
 * @brief return a pointer of the data of a collection
375
 *
376
 * @param col pointer to a collection header (can be NULL)
377
 *
378
 * @returns pointer to the collection data; NULL if col is NULL
379
 */
380
381
static void *get_collection_data(void *col)
382
677
{
383
677
  if (col)
384
498
    col = (uint8_t *)col + COLLECTION_HDR_SIZE;
385
677
  return col;
386
677
}
387
388
389
/**
390
 * @brief return a pointer of the data of a collection
391
 *
392
 * @param col pointer to a collection header (can be NULL)
393
 *
394
 * @returns pointer to the collection data; NULL if col is NULL
395
 */
396
397
static const void *get_collection_data_const(const void *col)
398
393
{
399
393
  if (col)
400
286
    col = (const uint8_t *)col + COLLECTION_HDR_SIZE;
401
393
  return col;
402
393
}
403
404
405
/**
406
 * @brief decompress imagette data
407
 *
408
 * @param cfg pointer to the compression configuration structure
409
 * @param dec a pointer to a bit_decoder context
410
 *
411
 * @returns 0 on success; otherwise error
412
 */
413
414
static int decompress_imagette(const struct cmp_cfg *cfg, struct bit_decoder *dec, enum decmp_type decmp_type)
415
117
{
416
117
  size_t i;
417
117
  int err;
418
117
  uint32_t decoded_value;
419
117
  uint32_t max_data_bits;
420
117
  struct decoder_setup setup;
421
117
  uint16_t *data_buf;
422
117
  const uint16_t *model_buf;
423
117
  uint16_t *up_model_buf;
424
117
  const uint16_t *next_model_p;
425
117
  uint16_t model;
426
427
117
  switch (decmp_type) {
428
112
  case RDCU_DECOMPRESSION: /* RDCU compresses the header like data */
429
112
    data_buf = cfg->dst;
430
112
    model_buf = cfg->model_buf;
431
112
    up_model_buf = cfg->updated_model_buf;
432
112
    break;
433
5
  case ICU_DECOMRESSION:
434
5
    data_buf = get_collection_data(cfg->dst);
435
5
    model_buf = get_collection_data_const(cfg->model_buf);
436
5
    up_model_buf = get_collection_data(cfg->updated_model_buf);
437
5
    break;
438
117
  }
439
440
117
  if (model_mode_is_used(cfg->cmp_mode)) {
441
63
    model =  get_unaligned(&model_buf[0]);
442
63
    next_model_p = &model_buf[1];
443
63
  } else {
444
54
    up_model_buf = NULL;
445
54
    memset(&model, 0, sizeof(model));
446
54
    next_model_p = data_buf;
447
54
  }
448
449
117
  switch (cfg->data_type) {
450
42
  case DATA_TYPE_IMAGETTE:
451
48
  case DATA_TYPE_IMAGETTE_ADAPTIVE:
452
48
    max_data_bits = MAX_USED_BITS.nc_imagette;
453
48
    break;
454
7
  case DATA_TYPE_SAT_IMAGETTE:
455
23
  case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE:
456
23
    max_data_bits = MAX_USED_BITS.saturated_imagette;
457
23
    break;
458
0
  default:
459
45
  case DATA_TYPE_F_CAM_IMAGETTE:
460
46
  case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE:
461
46
    max_data_bits = MAX_USED_BITS.fc_imagette;
462
46
    break;
463
117
  }
464
465
117
  configure_decoder_setup(&setup, dec, cfg->cmp_mode, cfg->cmp_par_imagette,
466
117
        cfg->spill_imagette, cfg->round, max_data_bits);
467
468
2.36k
  for (i = 0; ; i++) {
469
2.36k
    err = decode_value(&setup, &decoded_value, model);
470
2.36k
    if (err)
471
63
      break;
472
473
2.29k
    put_unaligned((uint16_t)decoded_value, &data_buf[i]);
474
475
2.29k
    if (up_model_buf) {
476
612
      uint16_t up_model = cmp_up_model((uint16_t)decoded_value, model, cfg->model_value,
477
612
                   setup.lossy_par);
478
612
      put_unaligned(up_model, &up_model_buf[i]);
479
612
    }
480
481
2.29k
    if (i >= cfg->samples-1)
482
54
      break;
483
484
2.24k
    model = get_unaligned(&next_model_p[i]);
485
2.24k
  }
486
117
  return err;
487
117
}
488
489
490
/**
491
 * @brief decompress short normal light flux (S_FX) data
492
 *
493
 * @param cfg pointer to the compression configuration structure
494
 * @param dec a pointer to a bit_decoder context
495
 *
496
 * @returns 0 on success; otherwise error
497
 */
498
499
static int decompress_s_fx(const struct cmp_cfg *cfg, struct bit_decoder *dec)
500
49
{
501
49
  size_t i;
502
49
  int err;
503
49
  uint32_t decoded_value;
504
49
  struct decoder_setup setup_exp_flags, setup_fx;
505
49
  struct s_fx *data_buf = get_collection_data(cfg->dst);
506
49
  const struct s_fx *model_buf = get_collection_data_const(cfg->model_buf);
507
49
  struct s_fx *up_model_buf;
508
49
  const struct s_fx *next_model_p;
509
49
  struct s_fx model;
510
511
49
  if (model_mode_is_used(cfg->cmp_mode)) {
512
36
    model = model_buf[0];
513
36
    next_model_p = &model_buf[1];
514
36
    up_model_buf = get_collection_data(cfg->updated_model_buf);
515
36
  } else {
516
13
    memset(&model, 0, sizeof(model));
517
13
    next_model_p = data_buf;
518
13
    up_model_buf = NULL;
519
13
  }
520
521
49
  configure_decoder_setup(&setup_exp_flags, dec, cfg->cmp_mode, cfg->cmp_par_exp_flags,
522
49
        cfg->spill_exp_flags, cfg->round, MAX_USED_BITS.s_exp_flags);
523
49
  configure_decoder_setup(&setup_fx, dec, cfg->cmp_mode, cfg->cmp_par_fx,
524
49
        cfg->spill_fx, cfg->round, MAX_USED_BITS.s_fx);
525
526
208
  for (i = 0; ; i++) {
527
208
    err = decode_value(&setup_exp_flags, &decoded_value, model.exp_flags);
528
208
    if (err)
529
11
      break;
530
197
    data_buf[i].exp_flags = (__typeof__(data_buf[i].exp_flags))decoded_value;
531
532
197
    err = decode_value(&setup_fx, &decoded_value, model.fx);
533
197
    if (err)
534
6
      break;
535
191
    data_buf[i].fx = decoded_value;
536
537
191
    if (up_model_buf) {
538
72
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
539
72
                 cfg->model_value, setup_exp_flags.lossy_par);
540
72
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
541
72
                cfg->model_value, setup_fx.lossy_par);
542
72
    }
543
544
191
    if (i >= cfg->samples-1)
545
32
      break;
546
547
159
    model = next_model_p[i];
548
159
  }
549
49
  return err;
550
49
}
551
552
553
/**
554
 * @brief decompress S_FX_EFX data
555
 *
556
 * @param cfg pointer to the compression configuration structure
557
 * @param dec a pointer to a bit_decoder context
558
 *
559
 * @returns 0 on success; otherwise error
560
 */
561
562
static int decompress_s_fx_efx(const struct cmp_cfg *cfg, struct bit_decoder *dec)
563
49
{
564
49
  size_t i;
565
49
  int err;
566
49
  uint32_t decoded_value;
567
49
  struct decoder_setup setup_exp_flags, setup_fx, setup_efx;
568
49
  struct s_fx_efx *data_buf = get_collection_data(cfg->dst);
569
49
  const struct s_fx_efx *model_buf = get_collection_data_const(cfg->model_buf);
570
49
  struct s_fx_efx *up_model_buf;
571
49
  const struct s_fx_efx *next_model_p;
572
49
  struct s_fx_efx model;
573
574
49
  if (model_mode_is_used(cfg->cmp_mode)) {
575
30
    up_model_buf = get_collection_data(cfg->updated_model_buf);
576
30
    model = model_buf[0];
577
30
    next_model_p = &model_buf[1];
578
30
  } else {
579
19
    up_model_buf = NULL;
580
19
    memset(&model, 0, sizeof(model));
581
19
    next_model_p = data_buf;
582
19
  }
583
584
49
  configure_decoder_setup(&setup_exp_flags, dec, cfg->cmp_mode, cfg->cmp_par_exp_flags,
585
49
        cfg->spill_exp_flags, cfg->round, MAX_USED_BITS.s_exp_flags);
586
49
  configure_decoder_setup(&setup_fx, dec, cfg->cmp_mode, cfg->cmp_par_fx,
587
49
        cfg->spill_fx, cfg->round, MAX_USED_BITS.s_fx);
588
49
  configure_decoder_setup(&setup_efx, dec, cfg->cmp_mode, cfg->cmp_par_efx,
589
49
        cfg->spill_efx, cfg->round, MAX_USED_BITS.s_efx);
590
591
118
  for (i = 0; ; i++) {
592
118
    err = decode_value(&setup_exp_flags, &decoded_value, model.exp_flags);
593
118
    if (err)
594
6
      break;
595
112
    data_buf[i].exp_flags = (__typeof__(data_buf[i].exp_flags)) decoded_value;
596
597
112
    err = decode_value(&setup_fx, &decoded_value, model.fx);
598
112
    if (err)
599
5
      break;
600
107
    data_buf[i].fx = decoded_value;
601
602
107
    err = decode_value(&setup_efx, &decoded_value, model.efx);
603
107
    if (err)
604
4
      break;
605
103
    data_buf[i].efx = decoded_value;
606
607
103
    if (up_model_buf) {
608
36
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
609
36
                 cfg->model_value, setup_exp_flags.lossy_par);
610
36
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
611
36
                cfg->model_value, setup_fx.lossy_par);
612
36
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
613
36
                 cfg->model_value, setup_efx.lossy_par);
614
36
    }
615
616
103
    if (i >= cfg->samples-1)
617
34
      break;
618
619
69
    model = next_model_p[i];
620
69
  }
621
49
  return err;
622
49
}
623
624
625
/**
626
 * @brief decompress short S_FX_NCOB data
627
 *
628
 * @param cfg pointer to the compression configuration structure
629
 * @param dec a pointer to a bit_decoder context
630
 *
631
 * @returns 0 on success; otherwise error
632
 */
633
634
static int decompress_s_fx_ncob(const struct cmp_cfg *cfg, struct bit_decoder *dec)
635
22
{
636
22
  size_t i;
637
22
  int err;
638
22
  uint32_t decoded_value;
639
22
  struct decoder_setup setup_exp_flags, setup_fx, setup_ncob;
640
22
  struct s_fx_ncob *data_buf = get_collection_data(cfg->dst);
641
22
  const struct s_fx_ncob *model_buf = get_collection_data_const(cfg->model_buf);
642
22
  struct s_fx_ncob *up_model_buf;
643
22
  const struct s_fx_ncob *next_model_p;
644
22
  struct s_fx_ncob model;
645
646
22
  if (model_mode_is_used(cfg->cmp_mode)) {
647
21
    up_model_buf = get_collection_data(cfg->updated_model_buf);
648
21
    model = model_buf[0];
649
21
    next_model_p = &model_buf[1];
650
21
  } else {
651
1
    up_model_buf = NULL;
652
1
    memset(&model, 0, sizeof(model));
653
1
    next_model_p = data_buf;
654
1
  }
655
656
22
  configure_decoder_setup(&setup_exp_flags, dec, cfg->cmp_mode, cfg->cmp_par_exp_flags,
657
22
        cfg->spill_exp_flags, cfg->round, MAX_USED_BITS.s_exp_flags);
658
22
  configure_decoder_setup(&setup_fx, dec, cfg->cmp_mode, cfg->cmp_par_fx,
659
22
        cfg->spill_fx, cfg->round, MAX_USED_BITS.s_fx);
660
22
  configure_decoder_setup(&setup_ncob, dec, cfg->cmp_mode, cfg->cmp_par_ncob,
661
22
        cfg->spill_ncob, cfg->round, MAX_USED_BITS.s_ncob);
662
663
109
  for (i = 0; ; i++) {
664
109
    err = decode_value(&setup_exp_flags, &decoded_value, model.exp_flags);
665
109
    if (err)
666
10
      break;
667
99
    data_buf[i].exp_flags = (__typeof__(data_buf[i].exp_flags)) decoded_value;
668
669
99
    err = decode_value(&setup_fx, &decoded_value, model.fx);
670
99
    if (err)
671
4
      break;
672
95
    data_buf[i].fx = decoded_value;
673
674
95
    err = decode_value(&setup_ncob, &decoded_value, model.ncob_x);
675
95
    if (err)
676
3
      break;
677
92
    data_buf[i].ncob_x = decoded_value;
678
679
92
    err = decode_value(&setup_ncob, &decoded_value, model.ncob_y);
680
92
    if (err)
681
1
      break;
682
91
    data_buf[i].ncob_y = decoded_value;
683
684
91
    if (up_model_buf) {
685
20
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
686
20
                 cfg->model_value, setup_exp_flags.lossy_par);
687
20
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
688
20
                cfg->model_value, setup_fx.lossy_par);
689
20
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
690
20
                    cfg->model_value, setup_ncob.lossy_par);
691
20
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
692
20
                    cfg->model_value, setup_ncob.lossy_par);
693
20
    }
694
695
91
    if (i >= cfg->samples-1)
696
4
      break;
697
698
87
    model = next_model_p[i];
699
87
  }
700
22
  return err;
701
22
}
702
703
704
/**
705
 * @brief decompress short S_FX_NCOB_ECOB data
706
 *
707
 * @param cfg pointer to the compression configuration structure
708
 * @param dec a pointer to a bit_decoder context
709
 *
710
 * @returns 0 on success; otherwise error
711
 */
712
713
static int decompress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, struct bit_decoder *dec)
714
24
{
715
24
  size_t i;
716
24
  int err;
717
24
  uint32_t decoded_value;
718
24
  struct decoder_setup setup_exp_flags, setup_fx, setup_ncob, setup_efx, setup_ecob;
719
24
  struct s_fx_efx_ncob_ecob *data_buf = get_collection_data(cfg->dst);
720
24
  const struct s_fx_efx_ncob_ecob *model_buf = get_collection_data_const(cfg->model_buf);
721
24
  struct s_fx_efx_ncob_ecob *up_model_buf;
722
24
  const struct s_fx_efx_ncob_ecob *next_model_p;
723
24
  struct s_fx_efx_ncob_ecob model;
724
725
24
  if (model_mode_is_used(cfg->cmp_mode)) {
726
17
    up_model_buf = get_collection_data(cfg->updated_model_buf);
727
17
    model = model_buf[0];
728
17
    next_model_p = &model_buf[1];
729
17
  } else {
730
7
    up_model_buf = NULL;
731
7
    memset(&model, 0, sizeof(model));
732
7
    next_model_p = data_buf;
733
7
  }
734
735
24
  configure_decoder_setup(&setup_exp_flags, dec, cfg->cmp_mode, cfg->cmp_par_exp_flags,
736
24
        cfg->spill_exp_flags, cfg->round, MAX_USED_BITS.s_exp_flags);
737
24
  configure_decoder_setup(&setup_fx, dec, cfg->cmp_mode, cfg->cmp_par_fx, cfg->spill_fx,
738
24
        cfg->round, MAX_USED_BITS.s_fx);
739
24
  configure_decoder_setup(&setup_ncob, dec, cfg->cmp_mode, cfg->cmp_par_ncob, cfg->spill_ncob,
740
24
        cfg->round, MAX_USED_BITS.s_ncob);
741
24
  configure_decoder_setup(&setup_efx, dec, cfg->cmp_mode, cfg->cmp_par_efx, cfg->spill_efx,
742
24
        cfg->round, MAX_USED_BITS.s_efx);
743
24
  configure_decoder_setup(&setup_ecob, dec, cfg->cmp_mode, cfg->cmp_par_ecob, cfg->spill_ecob,
744
24
        cfg->round, MAX_USED_BITS.s_ecob);
745
746
67
  for (i = 0; ; i++) {
747
67
    err = decode_value(&setup_exp_flags, &decoded_value, model.exp_flags);
748
67
    if (err)
749
2
      break;
750
65
    data_buf[i].exp_flags = (__typeof__(data_buf[i].exp_flags)) decoded_value;
751
752
65
    err = decode_value(&setup_fx, &decoded_value, model.fx);
753
65
    if (err)
754
1
      break;
755
64
    data_buf[i].fx = decoded_value;
756
757
64
    err = decode_value(&setup_ncob, &decoded_value, model.ncob_x);
758
64
    if (err)
759
1
      break;
760
63
    data_buf[i].ncob_x = decoded_value;
761
762
63
    err = decode_value(&setup_ncob, &decoded_value, model.ncob_y);
763
63
    if (err)
764
3
      break;
765
60
    data_buf[i].ncob_y = decoded_value;
766
767
60
    err = decode_value(&setup_efx, &decoded_value, model.efx);
768
60
    if (err)
769
5
      break;
770
55
    data_buf[i].efx = decoded_value;
771
772
55
    err = decode_value(&setup_ecob, &decoded_value, model.ecob_x);
773
55
    if (err)
774
5
      break;
775
50
    data_buf[i].ecob_x = decoded_value;
776
777
50
    err = decode_value(&setup_ecob, &decoded_value, model.ecob_y);
778
50
    if (err)
779
1
      break;
780
49
    data_buf[i].ecob_y = decoded_value;
781
782
49
    if (up_model_buf) {
783
10
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
784
10
                 cfg->model_value, setup_exp_flags.lossy_par);
785
10
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
786
10
                cfg->model_value, setup_fx.lossy_par);
787
10
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
788
10
                    cfg->model_value, setup_ncob.lossy_par);
789
10
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
790
10
                    cfg->model_value, setup_ncob.lossy_par);
791
10
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
792
10
                 cfg->model_value, setup_efx.lossy_par);
793
10
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
794
10
                    cfg->model_value, setup_ecob.lossy_par);
795
10
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
796
10
                    cfg->model_value, setup_ecob.lossy_par);
797
10
    }
798
799
49
    if (i >= cfg->samples-1)
800
6
      break;
801
802
43
    model = next_model_p[i];
803
43
  }
804
24
  return err;
805
24
}
806
807
808
/**
809
 * @brief decompress long normal light flux (L_FX) data
810
 *
811
 * @param cfg pointer to the compression configuration structure
812
 * @param dec a pointer to a bit_decoder context
813
 *
814
 * @returns 0 on success; otherwise error
815
 */
816
817
static int decompress_l_fx(const struct cmp_cfg *cfg, struct bit_decoder *dec)
818
25
{
819
25
  size_t i;
820
25
  int err;
821
25
  uint32_t decoded_value;
822
25
  struct decoder_setup setup_exp_flags, setup_fx, setup_fx_var;
823
25
  struct l_fx *data_buf = get_collection_data(cfg->dst);
824
25
  const struct l_fx *model_buf = get_collection_data_const(cfg->model_buf);
825
25
  struct l_fx *up_model_buf;
826
25
  const struct l_fx *next_model_p;
827
25
  struct l_fx model;
828
829
25
  if (model_mode_is_used(cfg->cmp_mode)) {
830
16
    up_model_buf = get_collection_data(cfg->updated_model_buf);
831
16
    model = model_buf[0];
832
16
    next_model_p = &model_buf[1];
833
16
  } else {
834
9
    up_model_buf = NULL;
835
9
    memset(&model, 0, sizeof(model));
836
9
    next_model_p = data_buf;
837
9
  }
838
839
25
  configure_decoder_setup(&setup_exp_flags, dec, cfg->cmp_mode, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
840
25
        cfg->round, MAX_USED_BITS.l_exp_flags);
841
25
  configure_decoder_setup(&setup_fx, dec, cfg->cmp_mode, cfg->cmp_par_fx, cfg->spill_fx,
842
25
        cfg->round, MAX_USED_BITS.l_fx);
843
25
  configure_decoder_setup(&setup_fx_var, dec, cfg->cmp_mode, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
844
25
        cfg->round, MAX_USED_BITS.l_fx_cob_variance);
845
846
79
  for (i = 0; ; i++) {
847
79
    err = decode_value(&setup_exp_flags, &decoded_value, model.exp_flags);
848
79
    if (err)
849
4
      break;
850
75
    data_buf[i].exp_flags = decoded_value;
851
852
75
    err = decode_value(&setup_fx, &decoded_value, model.fx);
853
75
    if (err)
854
3
      break;
855
72
    data_buf[i].fx = decoded_value;
856
857
72
    err = decode_value(&setup_fx_var, &decoded_value, model.fx_variance);
858
72
    if (err)
859
6
      break;
860
66
    data_buf[i].fx_variance = decoded_value;
861
862
66
    if (up_model_buf) {
863
18
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
864
18
                   cfg->model_value, setup_exp_flags.lossy_par);
865
18
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
866
18
                cfg->model_value, setup_fx.lossy_par);
867
18
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
868
18
                   cfg->model_value, setup_fx_var.lossy_par);
869
18
    }
870
871
66
    if (i >= cfg->samples-1)
872
12
      break;
873
874
54
    model = next_model_p[i];
875
54
  }
876
25
  return err;
877
25
}
878
879
880
/**
881
 * @brief decompress L_FX_EFX data
882
 *
883
 * @param cfg pointer to the compression configuration structure
884
 * @param dec a pointer to a bit_decoder context
885
 *
886
 * @returns 0 on success; otherwise error
887
 */
888
889
static int decompress_l_fx_efx(const struct cmp_cfg *cfg, struct bit_decoder *dec)
890
82
{
891
82
  size_t i;
892
82
  int err;
893
82
  uint32_t decoded_value;
894
82
  struct decoder_setup setup_exp_flags, setup_fx, setup_efx, setup_fx_var;
895
82
  struct l_fx_efx *data_buf = get_collection_data(cfg->dst);
896
82
  const struct l_fx_efx *model_buf = get_collection_data_const(cfg->model_buf);
897
82
  struct l_fx_efx *up_model_buf;
898
82
  const struct l_fx_efx *next_model_p;
899
82
  struct l_fx_efx model;
900
901
82
  if (model_mode_is_used(cfg->cmp_mode)) {
902
56
    up_model_buf = get_collection_data(cfg->updated_model_buf);
903
56
    model = model_buf[0];
904
56
    next_model_p = &model_buf[1];
905
56
  } else {
906
26
    up_model_buf = NULL;
907
26
    memset(&model, 0, sizeof(model));
908
26
    next_model_p = data_buf;
909
26
  }
910
911
82
  configure_decoder_setup(&setup_exp_flags, dec, cfg->cmp_mode, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
912
82
        cfg->round, MAX_USED_BITS.l_exp_flags);
913
82
  configure_decoder_setup(&setup_fx, dec, cfg->cmp_mode, cfg->cmp_par_fx, cfg->spill_fx,
914
82
        cfg->round, MAX_USED_BITS.l_fx);
915
82
  configure_decoder_setup(&setup_efx, dec, cfg->cmp_mode, cfg->cmp_par_efx, cfg->spill_efx,
916
82
        cfg->round, MAX_USED_BITS.l_efx);
917
82
  configure_decoder_setup(&setup_fx_var, dec, cfg->cmp_mode, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
918
82
        cfg->round, MAX_USED_BITS.l_fx_cob_variance);
919
920
94
  for (i = 0; ; i++) {
921
94
    err = decode_value(&setup_exp_flags, &decoded_value, model.exp_flags);
922
94
    if (err)
923
6
      break;
924
88
    data_buf[i].exp_flags = decoded_value;
925
926
88
    err = decode_value(&setup_fx, &decoded_value, model.fx);
927
88
    if (err)
928
2
      break;
929
86
    data_buf[i].fx = decoded_value;
930
931
86
    err = decode_value(&setup_efx, &decoded_value, model.efx);
932
86
    if (err)
933
5
      break;
934
81
    data_buf[i].efx = decoded_value;
935
936
81
    err = decode_value(&setup_fx_var, &decoded_value, model.fx_variance);
937
81
    if (err)
938
5
      break;
939
76
    data_buf[i].fx_variance = decoded_value;
940
941
76
    if (up_model_buf) {
942
11
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
943
11
                   cfg->model_value, setup_exp_flags.lossy_par);
944
11
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
945
11
                cfg->model_value, setup_fx.lossy_par);
946
11
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
947
11
                 cfg->model_value, setup_efx.lossy_par);
948
11
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
949
11
                   cfg->model_value, setup_fx_var.lossy_par);
950
11
    }
951
952
76
    if (i >= cfg->samples-1)
953
64
      break;
954
955
12
    model = next_model_p[i];
956
12
  }
957
82
  return err;
958
82
}
959
960
961
/**
962
 * @brief decompress L_FX_NCOB data
963
 *
964
 * @param cfg pointer to the compression configuration structure
965
 * @param dec a pointer to a bit_decoder context
966
 *
967
 * @returns 0 on success; otherwise error
968
 */
969
970
static int decompress_l_fx_ncob(const struct cmp_cfg *cfg, struct bit_decoder *dec)
971
23
{
972
23
  size_t i;
973
23
  int err;
974
23
  uint32_t decoded_value;
975
23
  struct decoder_setup setup_exp_flags, setup_fx, setup_ncob,
976
23
           setup_fx_var, setup_cob_var;
977
23
  struct l_fx_ncob *data_buf = get_collection_data(cfg->dst);
978
23
  const struct l_fx_ncob *model_buf = get_collection_data_const(cfg->model_buf);
979
23
  struct l_fx_ncob *up_model_buf;
980
23
  const struct l_fx_ncob *next_model_p;
981
23
  struct l_fx_ncob model;
982
983
23
  if (model_mode_is_used(cfg->cmp_mode)) {
984
19
    up_model_buf = get_collection_data(cfg->updated_model_buf);
985
19
    model = model_buf[0];
986
19
    next_model_p = &model_buf[1];
987
19
  } else {
988
4
    up_model_buf = NULL;
989
4
    memset(&model, 0, sizeof(model));
990
4
    next_model_p = data_buf;
991
4
  }
992
993
23
  configure_decoder_setup(&setup_exp_flags, dec, cfg->cmp_mode, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
994
23
        cfg->round, MAX_USED_BITS.l_exp_flags);
995
23
  configure_decoder_setup(&setup_fx, dec, cfg->cmp_mode, cfg->cmp_par_fx, cfg->spill_fx,
996
23
        cfg->round, MAX_USED_BITS.l_fx);
997
23
  configure_decoder_setup(&setup_ncob, dec, cfg->cmp_mode, cfg->cmp_par_ncob, cfg->spill_ncob,
998
23
        cfg->round, MAX_USED_BITS.l_ncob);
999
23
  configure_decoder_setup(&setup_fx_var, dec, cfg->cmp_mode, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1000
23
        cfg->round, MAX_USED_BITS.l_fx_cob_variance);
1001
23
  configure_decoder_setup(&setup_cob_var, dec, cfg->cmp_mode, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1002
23
        cfg->round, MAX_USED_BITS.l_fx_cob_variance);
1003
1004
67
  for (i = 0; ; i++) {
1005
67
    err = decode_value(&setup_exp_flags, &decoded_value, model.exp_flags);
1006
67
    if (err)
1007
3
      break;
1008
64
    data_buf[i].exp_flags = decoded_value;
1009
1010
64
    err = decode_value(&setup_fx, &decoded_value, model.fx);
1011
64
    if (err)
1012
1
      break;
1013
63
    data_buf[i].fx = decoded_value;
1014
1015
63
    err = decode_value(&setup_ncob, &decoded_value, model.ncob_x);
1016
63
    if (err)
1017
1
      break;
1018
62
    data_buf[i].ncob_x = decoded_value;
1019
1020
62
    err = decode_value(&setup_ncob, &decoded_value, model.ncob_y);
1021
62
    if (err)
1022
1
      break;
1023
61
    data_buf[i].ncob_y = decoded_value;
1024
1025
61
    err = decode_value(&setup_fx_var, &decoded_value, model.fx_variance);
1026
61
    if (err)
1027
3
      break;
1028
58
    data_buf[i].fx_variance = decoded_value;
1029
1030
58
    err = decode_value(&setup_cob_var, &decoded_value, model.cob_x_variance);
1031
58
    if (err)
1032
2
      break;
1033
56
    data_buf[i].cob_x_variance = decoded_value;
1034
1035
56
    err = decode_value(&setup_cob_var, &decoded_value, model.cob_y_variance);
1036
56
    if (err)
1037
8
      break;
1038
48
    data_buf[i].cob_y_variance = decoded_value;
1039
1040
48
    if (up_model_buf) {
1041
14
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1042
14
        cfg->model_value, setup_exp_flags.lossy_par);
1043
14
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1044
14
        cfg->model_value, setup_fx.lossy_par);
1045
14
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1046
14
        cfg->model_value, setup_ncob.lossy_par);
1047
14
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1048
14
        cfg->model_value, setup_ncob.lossy_par);
1049
14
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1050
14
        cfg->model_value, setup_fx_var.lossy_par);
1051
14
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1052
14
        cfg->model_value, setup_cob_var.lossy_par);
1053
14
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1054
14
        cfg->model_value, setup_cob_var.lossy_par);
1055
14
    }
1056
1057
48
    if (i >= cfg->samples-1)
1058
4
      break;
1059
1060
44
    model = next_model_p[i];
1061
44
  }
1062
23
  return err;
1063
23
}
1064
1065
1066
/**
1067
 * @brief decompress L_FX_EFX_NCOB_ECOB data
1068
 *
1069
 * @param cfg pointer to the compression configuration structure
1070
 * @param dec a pointer to a bit_decoder context
1071
 *
1072
 * @returns 0 on success; otherwise error
1073
 */
1074
1075
static int decompress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, struct bit_decoder *dec)
1076
21
{
1077
21
  size_t i;
1078
21
  int err;
1079
21
  uint32_t decoded_value;
1080
21
  struct decoder_setup setup_exp_flags, setup_fx, setup_ncob, setup_efx,
1081
21
           setup_ecob, setup_fx_var, setup_cob_var;
1082
21
  struct l_fx_efx_ncob_ecob *data_buf = get_collection_data(cfg->dst);
1083
21
  const struct l_fx_efx_ncob_ecob *model_buf = get_collection_data_const(cfg->model_buf);
1084
21
  struct l_fx_efx_ncob_ecob *up_model_buf;
1085
21
  const struct l_fx_efx_ncob_ecob *next_model_p;
1086
21
  struct l_fx_efx_ncob_ecob model;
1087
1088
21
  if (model_mode_is_used(cfg->cmp_mode)) {
1089
18
    up_model_buf = get_collection_data(cfg->updated_model_buf);
1090
18
    model = model_buf[0];
1091
18
    next_model_p = &model_buf[1];
1092
18
  } else {
1093
3
    up_model_buf = NULL;
1094
3
    memset(&model, 0, sizeof(model));
1095
3
    next_model_p = data_buf;
1096
3
  }
1097
1098
21
  configure_decoder_setup(&setup_exp_flags, dec, cfg->cmp_mode, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1099
21
        cfg->round, MAX_USED_BITS.l_exp_flags);
1100
21
  configure_decoder_setup(&setup_fx, dec, cfg->cmp_mode, cfg->cmp_par_fx, cfg->spill_fx,
1101
21
        cfg->round, MAX_USED_BITS.l_fx);
1102
21
  configure_decoder_setup(&setup_ncob, dec, cfg->cmp_mode, cfg->cmp_par_ncob, cfg->spill_ncob,
1103
21
        cfg->round, MAX_USED_BITS.l_ncob);
1104
21
  configure_decoder_setup(&setup_efx, dec, cfg->cmp_mode, cfg->cmp_par_efx, cfg->spill_efx,
1105
21
        cfg->round, MAX_USED_BITS.l_efx);
1106
21
  configure_decoder_setup(&setup_ecob, dec, cfg->cmp_mode, cfg->cmp_par_ecob, cfg->spill_ecob,
1107
21
        cfg->round, MAX_USED_BITS.l_ecob);
1108
21
  configure_decoder_setup(&setup_fx_var, dec, cfg->cmp_mode, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1109
21
        cfg->round, MAX_USED_BITS.l_fx_cob_variance);
1110
21
  configure_decoder_setup(&setup_cob_var, dec, cfg->cmp_mode, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1111
21
        cfg->round, MAX_USED_BITS.l_fx_cob_variance);
1112
1113
54
  for (i = 0; ; i++) {
1114
54
    err = decode_value(&setup_exp_flags, &decoded_value, model.exp_flags);
1115
54
    if (err)
1116
2
      break;
1117
52
    data_buf[i].exp_flags = decoded_value;
1118
1119
52
    err = decode_value(&setup_fx, &decoded_value, model.fx);
1120
52
    if (err)
1121
1
      break;
1122
51
    data_buf[i].fx = decoded_value;
1123
1124
51
    err = decode_value(&setup_ncob, &decoded_value, model.ncob_x);
1125
51
    if (err)
1126
1
      break;
1127
50
    data_buf[i].ncob_x = decoded_value;
1128
1129
50
    err = decode_value(&setup_ncob, &decoded_value, model.ncob_y);
1130
50
    if (err)
1131
1
      break;
1132
49
    data_buf[i].ncob_y = decoded_value;
1133
1134
49
    err = decode_value(&setup_efx, &decoded_value, model.efx);
1135
49
    if (err)
1136
2
      break;
1137
47
    data_buf[i].efx = decoded_value;
1138
1139
47
    err = decode_value(&setup_ecob, &decoded_value, model.ecob_x);
1140
47
    if (err)
1141
1
      break;
1142
46
    data_buf[i].ecob_x = decoded_value;
1143
1144
46
    err = decode_value(&setup_ecob, &decoded_value, model.ecob_y);
1145
46
    if (err)
1146
1
      break;
1147
45
    data_buf[i].ecob_y = decoded_value;
1148
1149
45
    err = decode_value(&setup_fx_var, &decoded_value, model.fx_variance);
1150
45
    if (err)
1151
1
      break;
1152
44
    data_buf[i].fx_variance = decoded_value;
1153
1154
44
    err = decode_value(&setup_cob_var, &decoded_value, model.cob_x_variance);
1155
44
    if (err)
1156
1
      break;
1157
43
    data_buf[i].cob_x_variance = decoded_value;
1158
1159
43
    err = decode_value(&setup_cob_var, &decoded_value, model.cob_y_variance);
1160
43
    if (err)
1161
6
      break;
1162
37
    data_buf[i].cob_y_variance = decoded_value;
1163
1164
37
    if (up_model_buf) {
1165
8
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1166
8
        cfg->model_value, setup_exp_flags.lossy_par);
1167
8
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1168
8
        cfg->model_value, setup_fx.lossy_par);
1169
8
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1170
8
        cfg->model_value, setup_ncob.lossy_par);
1171
8
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1172
8
        cfg->model_value, setup_ncob.lossy_par);
1173
8
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
1174
8
        cfg->model_value, setup_efx.lossy_par);
1175
8
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
1176
8
        cfg->model_value, setup_ecob.lossy_par);
1177
8
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
1178
8
        cfg->model_value, setup_ecob.lossy_par);
1179
8
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1180
8
        cfg->model_value, setup_fx_var.lossy_par);
1181
8
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1182
8
        cfg->model_value, setup_cob_var.lossy_par);
1183
8
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1184
8
        cfg->model_value, setup_cob_var.lossy_par);
1185
8
    }
1186
1187
37
    if (i >= cfg->samples-1)
1188
4
      break;
1189
1190
33
    model = next_model_p[i];
1191
33
  }
1192
21
  return err;
1193
21
}
1194
1195
1196
/**
1197
 * @brief decompress N-CAM and F-CAM offset data
1198
 *
1199
 * @param cfg pointer to the compression configuration structure
1200
 * @param dec a pointer to a bit_decoder context
1201
 *
1202
 * @returns 0 on success; otherwise error
1203
 */
1204
1205
static int decompress_offset(const struct cmp_cfg *cfg, struct bit_decoder *dec)
1206
32
{
1207
32
  size_t i;
1208
32
  int err;
1209
32
  uint32_t decoded_value;
1210
32
  struct decoder_setup setup_mean, setup_var;
1211
32
  struct offset *data_buf = get_collection_data(cfg->dst);
1212
32
  const struct offset *model_buf = get_collection_data_const(cfg->model_buf);
1213
32
  struct offset *up_model_buf;
1214
32
  const struct offset *next_model_p;
1215
32
  struct offset model;
1216
1217
32
  if (model_mode_is_used(cfg->cmp_mode)) {
1218
23
    up_model_buf = get_collection_data(cfg->updated_model_buf);
1219
23
    model = model_buf[0];
1220
23
    next_model_p = &model_buf[1];
1221
23
  } else {
1222
9
    up_model_buf = NULL;
1223
9
    memset(&model, 0, sizeof(model));
1224
9
    next_model_p = data_buf;
1225
9
  }
1226
1227
32
  {
1228
32
    unsigned int mean_bits_used, variance_bits_used;
1229
1230
32
    switch (cfg->data_type) {
1231
24
    case DATA_TYPE_F_CAM_OFFSET:
1232
24
      mean_bits_used = MAX_USED_BITS.fc_offset_mean;
1233
24
      variance_bits_used = MAX_USED_BITS.fc_offset_variance;
1234
24
      break;
1235
8
    case DATA_TYPE_OFFSET:
1236
8
    default:
1237
8
      mean_bits_used = MAX_USED_BITS.nc_offset_mean;
1238
8
      variance_bits_used = MAX_USED_BITS.nc_offset_variance;
1239
8
      break;
1240
32
    }
1241
32
    configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_offset_mean, cfg->spill_offset_mean,
1242
32
          cfg->round, mean_bits_used);
1243
1244
32
    configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_offset_variance, cfg->spill_offset_variance,
1245
32
          cfg->round, variance_bits_used);
1246
1247
32
  }
1248
1249
98
  for (i = 0; ; i++) {
1250
98
    err = decode_value(&setup_mean, &decoded_value, model.mean);
1251
98
    if (err)
1252
8
      break;
1253
90
    data_buf[i].mean = decoded_value;
1254
1255
90
    err = decode_value(&setup_var, &decoded_value, model.variance);
1256
90
    if (err)
1257
5
      break;
1258
85
    data_buf[i].variance = decoded_value;
1259
1260
85
    if (up_model_buf) {
1261
22
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean,
1262
22
        model.mean, cfg->model_value, setup_mean.lossy_par);
1263
22
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance,
1264
22
        model.variance, cfg->model_value, setup_var.lossy_par);
1265
22
    }
1266
1267
85
    if (i >= cfg->samples-1)
1268
19
      break;
1269
1270
66
    model = next_model_p[i];
1271
66
  }
1272
32
  return err;
1273
32
}
1274
1275
1276
/**
1277
 * @brief decompress N-CAM background data
1278
 *
1279
 * @param cfg pointer to the compression configuration structure
1280
 * @param dec a pointer to a bit_decoder context
1281
 *
1282
 * @returns 0 on success; otherwise error
1283
 */
1284
1285
static int decompress_background(const struct cmp_cfg *cfg, struct bit_decoder *dec)
1286
32
{
1287
32
  size_t i;
1288
32
  int err;
1289
32
  uint32_t decoded_value;
1290
32
  struct decoder_setup setup_mean, setup_var, setup_pix;
1291
32
  struct background *data_buf = get_collection_data(cfg->dst);
1292
32
  const struct background *model_buf = get_collection_data_const(cfg->model_buf);
1293
32
  struct background *up_model_buf;
1294
32
  const struct background *next_model_p;
1295
32
  struct background model;
1296
1297
32
  if (model_mode_is_used(cfg->cmp_mode)) {
1298
28
    up_model_buf = get_collection_data(cfg->updated_model_buf);
1299
28
    model = model_buf[0];
1300
28
    next_model_p = &model_buf[1];
1301
28
  } else {
1302
4
    up_model_buf = NULL;
1303
4
    memset(&model, 0, sizeof(model));
1304
4
    next_model_p = data_buf;
1305
4
  }
1306
32
  {
1307
32
    unsigned int mean_used_bits, variance_used_bits, outlier_pixels_used_bits;
1308
1309
32
    switch (cfg->data_type) {
1310
17
    case DATA_TYPE_F_CAM_BACKGROUND:
1311
17
      mean_used_bits = MAX_USED_BITS.fc_background_mean;
1312
17
      variance_used_bits = MAX_USED_BITS.fc_background_variance;
1313
17
      outlier_pixels_used_bits = MAX_USED_BITS.fc_background_outlier_pixels;
1314
17
      break;
1315
15
    case DATA_TYPE_BACKGROUND:
1316
15
    default:
1317
15
      mean_used_bits = MAX_USED_BITS.nc_background_mean;
1318
15
      variance_used_bits = MAX_USED_BITS.nc_background_variance;
1319
15
      outlier_pixels_used_bits = MAX_USED_BITS.nc_background_outlier_pixels;
1320
15
      break;
1321
32
    }
1322
1323
32
    configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_background_mean, cfg->spill_background_mean,
1324
32
          cfg->round, mean_used_bits);
1325
1326
32
    configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_background_variance, cfg->spill_background_variance,
1327
32
          cfg->round, variance_used_bits);
1328
1329
32
    configure_decoder_setup(&setup_pix, dec, cfg->cmp_mode, cfg->cmp_par_background_pixels_error, cfg->spill_background_pixels_error,
1330
32
          cfg->round, outlier_pixels_used_bits);
1331
1332
32
  }
1333
1334
124
  for (i = 0; ; i++) {
1335
124
    err = decode_value(&setup_mean, &decoded_value, model.mean);
1336
124
    if (err)
1337
16
      break;
1338
108
    data_buf[i].mean = decoded_value;
1339
1340
108
    err = decode_value(&setup_var, &decoded_value, model.variance);
1341
108
    if (err)
1342
5
      break;
1343
103
    data_buf[i].variance = decoded_value;
1344
1345
103
    err = decode_value(&setup_pix, &decoded_value, model.outlier_pixels);
1346
103
    if (err)
1347
3
      break;
1348
100
    data_buf[i].outlier_pixels = (__typeof__(data_buf[i].outlier_pixels))decoded_value;
1349
1350
100
    if (up_model_buf) {
1351
21
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean,
1352
21
        model.mean, cfg->model_value, setup_mean.lossy_par);
1353
21
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance,
1354
21
        model.variance, cfg->model_value, setup_var.lossy_par);
1355
21
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels,
1356
21
        model.outlier_pixels, cfg->model_value, setup_pix.lossy_par);
1357
21
    }
1358
1359
100
    if (i >= cfg->samples-1)
1360
8
      break;
1361
1362
92
    model = next_model_p[i];
1363
92
  }
1364
32
  return err;
1365
32
}
1366
1367
1368
/**
1369
 * @brief decompress N-CAM smearing data
1370
 *
1371
 * @param cfg pointer to the compression configuration structure
1372
 * @param dec a pointer to a bit_decoder context
1373
 *
1374
 * @returns 0 on success; otherwise error
1375
 */
1376
1377
static int decompress_smearing(const struct cmp_cfg *cfg, struct bit_decoder *dec)
1378
29
{
1379
29
  size_t i;
1380
29
  int err;
1381
29
  uint32_t decoded_value;
1382
29
  struct decoder_setup setup_mean, setup_var, setup_pix;
1383
29
  struct smearing *data_buf = get_collection_data(cfg->dst);
1384
29
  const struct smearing *model_buf = get_collection_data_const(cfg->model_buf);
1385
29
  struct smearing *up_model_buf;
1386
29
  const struct smearing *next_model_p;
1387
29
  struct smearing model;
1388
1389
29
  if (model_mode_is_used(cfg->cmp_mode)) {
1390
15
    up_model_buf = get_collection_data(cfg->updated_model_buf);
1391
15
    model = model_buf[0];
1392
15
    next_model_p = &model_buf[1];
1393
15
  } else {
1394
14
    up_model_buf = NULL;
1395
14
    memset(&model, 0, sizeof(model));
1396
14
    next_model_p = data_buf;
1397
14
  }
1398
1399
29
  configure_decoder_setup(&setup_mean, dec, cfg->cmp_mode, cfg->cmp_par_smearing_mean, cfg->spill_smearing_mean,
1400
29
        cfg->round, MAX_USED_BITS.smearing_mean);
1401
29
  configure_decoder_setup(&setup_var, dec, cfg->cmp_mode, cfg->cmp_par_smearing_variance, cfg->spill_smearing_variance,
1402
29
        cfg->round, MAX_USED_BITS.smearing_variance_mean);
1403
29
  configure_decoder_setup(&setup_pix, dec, cfg->cmp_mode, cfg->cmp_par_smearing_pixels_error, cfg->spill_smearing_pixels_error,
1404
29
        cfg->round, MAX_USED_BITS.smearing_outlier_pixels);
1405
1406
76
  for (i = 0; ; i++) {
1407
76
    err = decode_value(&setup_mean, &decoded_value, model.mean);
1408
76
    if (err)
1409
5
      break;
1410
71
    data_buf[i].mean = decoded_value;
1411
1412
71
    err = decode_value(&setup_var, &decoded_value, model.variance_mean);
1413
71
    if (err)
1414
5
      break;
1415
66
    data_buf[i].variance_mean = (__typeof__(data_buf[i].variance_mean))decoded_value;
1416
1417
66
    err = decode_value(&setup_pix, &decoded_value, model.outlier_pixels);
1418
66
    if (err)
1419
5
      break;
1420
61
    data_buf[i].outlier_pixels = (__typeof__(data_buf[i].outlier_pixels))decoded_value;
1421
1422
61
    if (up_model_buf) {
1423
18
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean,
1424
18
        model.mean, cfg->model_value, setup_mean.lossy_par);
1425
18
      up_model_buf[i].variance_mean = cmp_up_model(data_buf[i].variance_mean,
1426
18
        model.variance_mean, cfg->model_value, setup_var.lossy_par);
1427
18
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels,
1428
18
        model.outlier_pixels, cfg->model_value, setup_pix.lossy_par);
1429
18
    }
1430
1431
61
    if (i >= cfg->samples-1)
1432
14
      break;
1433
1434
47
    model = next_model_p[i];
1435
47
  }
1436
29
  return err;
1437
29
}
1438
1439
1440
/**
1441
 * @brief Decompresses the collection header.
1442
 *
1443
 * @param cfg pointer to the compression configuration structure
1444
 *
1445
 * @note the collection header is not truly compressed; it is simply copied into
1446
 *  the compressed data.
1447
 *
1448
 * @return The size of the decompressed collection header on success,
1449
 *         or -1 if the buffer length is insufficient
1450
 */
1451
1452
static int decompress_collection_hdr(const struct cmp_cfg *cfg)
1453
394
{
1454
394
  if (cfg->stream_size < COLLECTION_HDR_SIZE)
1455
0
    return -1;
1456
1457
394
  if (cfg->src) {
1458
394
    if (cfg->dst)
1459
394
      memcpy(cfg->dst, cfg->src, COLLECTION_HDR_SIZE);
1460
1461
394
    if (model_mode_is_used(cfg->cmp_mode) && cfg->updated_model_buf)
1462
104
      memcpy(cfg->updated_model_buf, cfg->src, COLLECTION_HDR_SIZE);
1463
394
  }
1464
394
  return COLLECTION_HDR_SIZE;
1465
394
}
1466
1467
1468
/**
1469
 * @brief decompress the data based on a compression configuration
1470
 *
1471
 * @param cfg   pointer to a compression configuration
1472
 * @param decmp_type  type of decompression: ICU chunk or RDCU decompression
1473
 *
1474
 * @note cfg->buffer_length is measured in bytes
1475
 *
1476
 * @returns the size of the decompressed data on success; returns negative on failure
1477
 */
1478
1479
static int decompressed_data_internal(const struct cmp_cfg *cfg, enum decmp_type decmp_type)
1480
2.40k
{
1481
2.40k
  int err;
1482
2.40k
  uint32_t data_size;
1483
1484
2.40k
  assert(decmp_type == ICU_DECOMRESSION || decmp_type == RDCU_DECOMPRESSION);
1485
1486
2.40k
  if (!cfg)
1487
0
    return -1;
1488
1489
2.40k
  if (!cfg->src)
1490
0
    return -1;
1491
1492
2.40k
  if (cmp_cfg_gen_par_is_invalid(cfg))
1493
0
    return -1;
1494
1495
2.40k
  if (cmp_imagette_data_type_is_used(cfg->data_type)) {
1496
384
    if (cmp_cfg_imagette_is_invalid(cfg))
1497
5
      return -1;
1498
2.02k
  } else if (cmp_fx_cob_data_type_is_used(cfg->data_type)) {
1499
1.40k
    if (cmp_cfg_fx_cob_is_invalid(cfg))
1500
15
      return -1;
1501
1.40k
  } else if (cmp_aux_data_type_is_used(cfg->data_type)) {
1502
617
    if (cmp_cfg_aux_is_invalid(cfg))
1503
13
      return -1;
1504
617
  } else {
1505
0
    return -1;
1506
0
  }
1507
1508
2.37k
  if (model_mode_is_used(cfg->cmp_mode))
1509
771
    if (!cfg->model_buf) /* we need a model for model compression */
1510
2
      return -1;
1511
1512
2.36k
  data_size = cfg->samples * (uint32_t)size_of_a_sample(cfg->data_type);
1513
2.36k
  if (decmp_type == ICU_DECOMRESSION)
1514
2.14k
    data_size += COLLECTION_HDR_SIZE;
1515
1516
2.36k
  if (cfg->cmp_mode == CMP_MODE_RAW) {
1517
1.16k
    if (cfg->dst) {
1518
461
      memcpy(cfg->dst, cfg->src, data_size);
1519
461
      switch (decmp_type) {
1520
461
      case ICU_DECOMRESSION:
1521
461
        if (be_to_cpu_chunk(cfg->dst, data_size))
1522
0
          return -1;
1523
461
        break;
1524
461
      case RDCU_DECOMPRESSION:
1525
0
        if (be_to_cpu_data_type(cfg->dst, data_size,
1526
0
              cfg->data_type))
1527
0
          return -1;
1528
0
        break;
1529
461
      }
1530
461
    }
1531
1.16k
    err = 0;
1532
1533
1.20k
  } else {
1534
1.20k
    struct bit_decoder dec;
1535
1.20k
    int hdr_size = 0;
1536
1537
1.20k
    if (!cfg->dst)
1538
694
      return (int)data_size;
1539
1540
506
    if (decmp_type == ICU_DECOMRESSION) {
1541
394
      hdr_size = decompress_collection_hdr(cfg);
1542
394
      if (hdr_size < 0)
1543
0
        return -1;
1544
394
    }
1545
1546
506
    bit_init_decoder(&dec, (const uint8_t *)cfg->src+hdr_size,
1547
506
         cfg->stream_size-(uint32_t)hdr_size);
1548
1549
506
    switch (cfg->data_type) {
1550
42
    case DATA_TYPE_IMAGETTE:
1551
48
    case DATA_TYPE_IMAGETTE_ADAPTIVE:
1552
55
    case DATA_TYPE_SAT_IMAGETTE:
1553
71
    case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE:
1554
116
    case DATA_TYPE_F_CAM_IMAGETTE:
1555
117
    case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE:
1556
117
      err = decompress_imagette(cfg, &dec, decmp_type);
1557
117
      break;
1558
49
    case DATA_TYPE_S_FX:
1559
49
      err = decompress_s_fx(cfg, &dec);
1560
49
      break;
1561
49
    case DATA_TYPE_S_FX_EFX:
1562
49
      err = decompress_s_fx_efx(cfg, &dec);
1563
49
      break;
1564
22
    case DATA_TYPE_S_FX_NCOB:
1565
22
      err = decompress_s_fx_ncob(cfg, &dec);
1566
22
      break;
1567
24
    case DATA_TYPE_S_FX_EFX_NCOB_ECOB:
1568
24
      err = decompress_s_fx_efx_ncob_ecob(cfg, &dec);
1569
24
      break;
1570
1571
25
    case DATA_TYPE_L_FX:
1572
25
      err = decompress_l_fx(cfg, &dec);
1573
25
      break;
1574
82
    case DATA_TYPE_L_FX_EFX:
1575
82
      err = decompress_l_fx_efx(cfg, &dec);
1576
82
      break;
1577
23
    case DATA_TYPE_L_FX_NCOB:
1578
23
      err = decompress_l_fx_ncob(cfg, &dec);
1579
23
      break;
1580
21
    case DATA_TYPE_L_FX_EFX_NCOB_ECOB:
1581
21
      err = decompress_l_fx_efx_ncob_ecob(cfg, &dec);
1582
21
      break;
1583
1584
8
    case DATA_TYPE_OFFSET:
1585
32
    case DATA_TYPE_F_CAM_OFFSET:
1586
32
      err = decompress_offset(cfg, &dec);
1587
32
      break;
1588
15
    case DATA_TYPE_BACKGROUND:
1589
32
    case DATA_TYPE_F_CAM_BACKGROUND:
1590
32
      err = decompress_background(cfg, &dec);
1591
32
      break;
1592
29
    case DATA_TYPE_SMEARING:
1593
29
      err = decompress_smearing(cfg, &dec);
1594
29
      break;
1595
1596
1
    case DATA_TYPE_F_FX:
1597
1
    case DATA_TYPE_F_FX_EFX:
1598
1
    case DATA_TYPE_F_FX_NCOB:
1599
1
    case DATA_TYPE_F_FX_EFX_NCOB_ECOB:
1600
1
    case DATA_TYPE_CHUNK:
1601
1
    case DATA_TYPE_UNKNOWN:
1602
1
    default:
1603
1
      err = -1;
1604
1
      debug_print("Error: Compressed data type not supported.");
1605
1
      break;
1606
506
    }
1607
1608
506
    switch (bit_refill(&dec)) {
1609
137
    case BIT_OVERFLOW:
1610
137
      if (dec.cursor == dec.limit_ptr)
1611
134
        debug_print("Error: The end of the compressed bit stream has been exceeded. Please check that the compression parameters match those used to compress the data and that the compressed data are not corrupted.");
1612
3
      else
1613
3
        debug_print("Error: Data consistency check failed. %s", please_check_str);
1614
137
      break;
1615
250
    case BIT_END_OF_BUFFER:
1616
      /* check if non consumed bits are zero */
1617
250
      { unsigned int bits_not_read = sizeof(dec.bit_container)*8 - dec.bits_consumed;
1618
1619
250
        if (bits_not_read > 57) /* can not read more than 57 bits */
1620
31
          bits_not_read = 57;
1621
1622
250
        if (bit_read_bits(&dec, bits_not_read ) == 0)
1623
72
          break;
1624
250
      } /* fall through */
1625
271
    case BIT_UNFINISHED:
1626
271
      debug_print("Warning: Not all compressed data are processed.");
1627
271
      break;
1628
506
    }
1629
506
  }
1630
1.67k
  if (err)
1631
251
    return -1;
1632
1633
1.42k
  return (int)data_size;
1634
1.67k
}
1635
1636
1637
/**
1638
 * @brief read in an imagette compression entity header to a
1639
 *  compression configuration
1640
 *
1641
 * @param ent pointer to a compression entity
1642
 * @param cfg pointer to a compression configuration
1643
 *
1644
 * @returns 0 on success; otherwise error
1645
 */
1646
1647
static int cmp_ent_read_header(const struct cmp_entity *ent, struct cmp_cfg *cfg)
1648
1.24k
{
1649
1.24k
  uint32_t org_size;
1650
1651
1.24k
  if (!cfg)
1652
0
    return -1;
1653
1654
1.24k
  cfg->data_type = cmp_ent_get_data_type(ent);
1655
  /* the compression entity data type field only supports imagette or chunk data types */
1656
1.24k
  if (cfg->data_type != DATA_TYPE_CHUNK && !rdcu_supported_data_type_is_used(cfg->data_type)) {
1657
3
    debug_print("Error: Compression entity data type not supported.");
1658
3
    return -1;
1659
3
  }
1660
1661
1.24k
  cfg->cmp_mode = cmp_ent_get_cmp_mode(ent);
1662
1.24k
  if (cmp_ent_get_data_type_raw_bit(ent) != (cfg->cmp_mode == CMP_MODE_RAW)) {
1663
1
    debug_print("Error: The entity's raw data bit does not match up with the compression mode.");
1664
1
    return -1;
1665
1
  }
1666
1.24k
  cfg->model_value = cmp_ent_get_model_value(ent);
1667
1.24k
  cfg->round = cmp_ent_get_lossy_cmp_par(ent);
1668
1.24k
  cfg->stream_size = cmp_ent_get_cmp_data_size(ent);
1669
1670
1.24k
  if (cmp_cfg_gen_par_is_invalid(cfg))
1671
3
    return -1;
1672
1673
1.24k
  org_size = cmp_ent_get_original_size(ent);
1674
1.24k
  if (cfg->data_type == DATA_TYPE_CHUNK) {
1675
980
    cfg->samples = 0;
1676
980
    if ((cfg->stream_size < (COLLECTION_HDR_SIZE + CMP_COLLECTION_FILD_SIZE) && (cfg->cmp_mode != CMP_MODE_RAW)) ||
1677
980
        (cfg->stream_size < COLLECTION_HDR_SIZE && (cfg->cmp_mode == CMP_MODE_RAW))) {
1678
2
      debug_print("Error: The compressed data size in the compression header is smaller than a collection header.");
1679
2
      return -1;
1680
2
    }
1681
978
    if (org_size < COLLECTION_HDR_SIZE) {
1682
1
      debug_print("Error: The original decompressed data size in the compression header is smaller than the minimum size.");
1683
1
      return -1;
1684
1
    }
1685
978
  } else {
1686
260
    if (org_size % sizeof(uint16_t)) {
1687
1
      debug_print("Error: The original size of an imagette product type in the compression header must be a multiple of 2.");
1688
1
      cfg->samples = 0;
1689
1
      return -1;
1690
1
    }
1691
259
    cfg->samples = org_size/sizeof(uint16_t);
1692
259
  }
1693
1694
1.23k
  cfg->src = cmp_ent_get_data_buf_const(ent);
1695
1696
1.23k
  if (cmp_ent_get_reserved(ent))
1697
943
    debug_print("Warning: The reserved field in the compressed header should be zero.");
1698
1699
1.23k
  if (cfg->cmp_mode == CMP_MODE_RAW) {
1700
136
    if (cmp_ent_get_original_size(ent) != cmp_ent_get_cmp_data_size(ent)) {
1701
2
      debug_print("Error: The compressed data size and the decompressed original data size in the compression header should be the same in raw mode.");
1702
2
      return -1;
1703
2
    }
1704
    /* no specific header is used for raw data we are done */
1705
134
    return 0;
1706
136
  }
1707
1708
1.10k
  if (cmp_ent_cal_hdr_size(cfg->data_type, cfg->cmp_mode == CMP_MODE_RAW)
1709
1.10k
      > cmp_ent_get_size(ent)) {
1710
6
    debug_print("Error: The compression entity size is smaller than the minimum allowed size.");
1711
6
    return -1;
1712
6
  }
1713
1714
1.09k
  switch (cfg->data_type) {
1715
13
  case DATA_TYPE_IMAGETTE_ADAPTIVE:
1716
46
  case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE:
1717
49
  case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE:
1718
    /* we do not read in adaptive parameters */
1719
130
  case DATA_TYPE_IMAGETTE:
1720
141
  case DATA_TYPE_SAT_IMAGETTE:
1721
232
  case DATA_TYPE_F_CAM_IMAGETTE:
1722
232
    cfg->cmp_par_imagette = cmp_ent_get_ima_golomb_par(ent);
1723
232
    cfg->spill_imagette = cmp_ent_get_ima_spill(ent);
1724
232
    break;
1725
0
  case DATA_TYPE_OFFSET:
1726
0
  case DATA_TYPE_F_CAM_OFFSET:
1727
0
  case DATA_TYPE_BACKGROUND:
1728
0
  case DATA_TYPE_F_CAM_BACKGROUND:
1729
0
  case DATA_TYPE_SMEARING:
1730
0
  case DATA_TYPE_S_FX:
1731
0
  case DATA_TYPE_S_FX_EFX:
1732
0
  case DATA_TYPE_S_FX_NCOB:
1733
0
  case DATA_TYPE_S_FX_EFX_NCOB_ECOB:
1734
0
  case DATA_TYPE_L_FX:
1735
0
  case DATA_TYPE_L_FX_EFX:
1736
0
  case DATA_TYPE_L_FX_NCOB:
1737
0
  case DATA_TYPE_L_FX_EFX_NCOB_ECOB:
1738
0
  case DATA_TYPE_F_FX:
1739
0
  case DATA_TYPE_F_FX_EFX:
1740
0
  case DATA_TYPE_F_FX_NCOB:
1741
0
  case DATA_TYPE_F_FX_EFX_NCOB_ECOB:
1742
862
  case DATA_TYPE_CHUNK:
1743
862
    cfg->cmp_par_exp_flags = cmp_ent_get_non_ima_cmp_par1(ent);
1744
862
    cfg->spill_exp_flags = cmp_ent_get_non_ima_spill1(ent);
1745
862
    cfg->cmp_par_fx = cmp_ent_get_non_ima_cmp_par2(ent);
1746
862
    cfg->spill_fx = cmp_ent_get_non_ima_spill2(ent);
1747
862
    cfg->cmp_par_ncob = cmp_ent_get_non_ima_cmp_par3(ent);
1748
862
    cfg->spill_ncob = cmp_ent_get_non_ima_spill3(ent);
1749
862
    cfg->cmp_par_efx = cmp_ent_get_non_ima_cmp_par4(ent);
1750
862
    cfg->spill_efx = cmp_ent_get_non_ima_spill4(ent);
1751
862
    cfg->cmp_par_ecob = cmp_ent_get_non_ima_cmp_par5(ent);
1752
862
    cfg->spill_ecob = cmp_ent_get_non_ima_spill5(ent);
1753
862
    cfg->cmp_par_fx_cob_variance = cmp_ent_get_non_ima_cmp_par6(ent);
1754
862
    cfg->spill_fx_cob_variance = cmp_ent_get_non_ima_spill6(ent);
1755
862
    break;
1756
  /* LCOV_EXCL_START */
1757
0
  case DATA_TYPE_UNKNOWN:
1758
0
  default:
1759
0
    return -1;
1760
  /* LCOV_EXCL_STOP */
1761
1.09k
  }
1762
1763
1.09k
  return 0;
1764
1.09k
}
1765
1766
1767
/**
1768
 * @brief Get the size of the compressed collection data
1769
 *
1770
 * @param cmp_col pointer to a compressed collection
1771
 *
1772
 * @return The size of the compressed collection data in bytes
1773
 */
1774
1775
static uint16_t get_cmp_collection_data_length(const uint8_t *cmp_col)
1776
8.94k
{
1777
8.94k
  uint16_t cmp_data_size;
1778
  /* If a non-raw mode is used to compress all collections, a
1779
   * 2-byte big endian field with the size of the compressed data
1780
   * is prefixed (without the size of the file itself and without
1781
   * the size of the collection header). This is followed by a
1782
   * collection header, followed by the compressed data.
1783
   * |---------------------| - cmp_col
1784
   * |compressed collection|
1785
   * |      data size      | 2 bytes
1786
   * |---------------------|-
1787
   * |   COLLECTION HDR    |
1788
   * |                     | 12 bytes
1789
   * |---------------------|-
1790
   * |    compressed data  | (variable) data size
1791
   * |         *-*-*       |
1792
   * |         *-*-*       |
1793
   * |---------------------|- next cmp_col
1794
   * Fields not scaled correctly
1795
   */
1796
1797
8.94k
  memcpy(&cmp_data_size, cmp_col, sizeof(cmp_data_size));
1798
8.94k
  be16_to_cpus(&cmp_data_size);
1799
1800
8.94k
  return cmp_data_size;
1801
8.94k
}
1802
1803
1804
/**
1805
 * @brief get the total size of the compressed collection
1806
 * j
1807
 * This function returns the total size of the compressed collection in bytes,
1808
 * including the size of the compressed size field itself, the collection header,
1809
 * and the compressed collection data.
1810
 *
1811
 * @param cmp_col pointer to a compressed collection
1812
 *
1813
 * @return The total size of the compressed collection in bytes
1814
 */
1815
1816
static uint32_t get_cmp_collection_size(const uint8_t *cmp_col)
1817
6.75k
{
1818
6.75k
  return CMP_COLLECTION_FILD_SIZE + COLLECTION_HDR_SIZE
1819
6.75k
    + get_cmp_collection_data_length(cmp_col);
1820
6.75k
}
1821
1822
1823
/**
1824
 * @brief get the number of compressed collections in a compression entity
1825
 *
1826
 * This function returns the number of compressed collections in a compression
1827
 * entity, by iterating over the compressed collection data
1828
 *
1829
 * @param ent  pointer to the compression entity
1830
 *
1831
 * @return the number of compressed collections in the compressed entity, or -1
1832
 *  on error
1833
 */
1834
1835
static int get_num_of_chunks(const struct cmp_entity *ent)
1836
862
{
1837
862
  const uint8_t *cmp_data_p = cmp_ent_get_data_buf_const(ent);
1838
862
  long const cmp_data_size = cmp_ent_get_cmp_data_size(ent);
1839
862
  int n = 0;
1840
862
  const uint8_t *p = cmp_data_p;
1841
  /* highest plausible address of compressed collection */
1842
862
  const uint8_t *limit_ptr = cmp_data_p + cmp_data_size - COLLECTION_HDR_SIZE;
1843
1844
3.14k
  while (p < limit_ptr) {
1845
2.28k
    p += get_cmp_collection_size(p);
1846
2.28k
    n++;
1847
2.28k
  }
1848
1849
862
  if (p-cmp_data_p != cmp_data_size) {
1850
2
    debug_print("Error: The sum of the compressed collection does not match the size of the data in the compression header.");
1851
2
    return -1;
1852
2
  }
1853
860
  return n;
1854
862
}
1855
1856
1857
/**
1858
 * @brief Parse n'th compressed collection and set configuration parameters
1859
 *  for decompression it
1860
 *
1861
 * @param cmp_col   pointer to a compressed collection to parse
1862
 * @param n     the number of the compressed collection to
1863
 *        parse, starting from 1
1864
 * @param cfg     pointer to the configuration structure
1865
 * @param coll_uncompressed pointer to store whether the collection is
1866
 *        uncompressed or not
1867
 * @param decmp_size    size of the original decompressed data
1868
 *
1869
 * @return the byte offset where to put the uncompressed result in the
1870
 *  decompressed data, or -1 on error.
1871
 */
1872
1873
static long parse_cmp_collection(const uint8_t *cmp_col, int n, struct cmp_cfg *cfg,
1874
         int *coll_uncompressed, int decmp_size)
1875
2.19k
{
1876
2.19k
  int i;
1877
2.19k
  long decmp_pos = 0; /* position where to put the uncompressed result */
1878
  /* pointer to the collection header */
1879
2.19k
  const struct collection_hdr *col_hdr =
1880
2.19k
    (const struct collection_hdr *)(cmp_col + CMP_COLLECTION_FILD_SIZE);
1881
2.19k
  uint32_t cmp_data_size; /* size of the compressed data in the collection (not including the header) */
1882
2.19k
  uint16_t original_col_size; /* size of the decompressed collection data (not including the header) */
1883
2.19k
  size_t sample_size;
1884
1885
  /* get to the collection we want to decompress */
1886
6.66k
  for (i = 0; i < n; i++) {
1887
4.47k
    decmp_pos += cmp_col_get_size(col_hdr);
1888
4.47k
    cmp_col += get_cmp_collection_size(cmp_col);
1889
4.47k
    col_hdr = (const struct collection_hdr *)(cmp_col + CMP_COLLECTION_FILD_SIZE);
1890
4.47k
  }
1891
1892
2.19k
  cmp_data_size = get_cmp_collection_data_length(cmp_col);
1893
2.19k
  original_col_size = cmp_col_get_data_length(col_hdr);
1894
1895
2.19k
  if (cmp_data_size > original_col_size) {
1896
1
    debug_print("Error: Collection %i, the size of the compressed collection is larger than that of the uncompressed collection.", i);
1897
1
    return -1;
1898
1
  }
1899
1900
  /* if the compressed data size == original_col_size the collection data
1901
   * was put uncompressed into the bitstream */
1902
2.19k
  if (cmp_data_size == original_col_size)
1903
1.17k
    *coll_uncompressed = 1;
1904
1.01k
  else
1905
1.01k
    *coll_uncompressed = 0;
1906
1907
2.19k
  cfg->src = col_hdr;
1908
2.19k
  cfg->stream_size = cmp_data_size + COLLECTION_HDR_SIZE;
1909
1910
2.19k
  cfg->data_type = convert_subservice_to_cmp_data_type(cmp_col_get_subservice(col_hdr));
1911
2.19k
  sample_size = size_of_a_sample(cfg->data_type);
1912
2.19k
  if (!sample_size)
1913
11
    return -1;
1914
1915
2.18k
  if (original_col_size % sample_size) {
1916
7
    debug_print("Error: The size of the collection is not a multiple of a collection entry.");
1917
7
    return -1;
1918
7
  }
1919
2.17k
  cfg->samples = original_col_size / sample_size;
1920
1921
2.17k
  if (decmp_pos + original_col_size + COLLECTION_HDR_SIZE > decmp_size) {
1922
1
    debug_print("Error: The compressed data and the original size do not match.");
1923
1
    return -1;
1924
1
  }
1925
1926
2.17k
  return decmp_pos;
1927
2.17k
}
1928
1929
1930
/**
1931
 * @brief decompress a compression entity
1932
 *
1933
 * @note this function assumes that the entity size in the ent header is correct
1934
 * @param ent     pointer to the compression entity to be decompressed
1935
 * @param model_of_data   pointer to model data buffer (can be NULL if no
1936
 *        model compression mode is used)
1937
 * @param up_model_buf    pointer to store the updated model for the next model
1938
 *        mode compression (can be the same as the model_of_data
1939
 *        buffer for an in-place update or NULL if the updated model is not needed)
1940
 * @param decompressed_data pointer to the decompressed data buffer (can be NULL)
1941
 *
1942
 * @returns the size of the decompressed data on success; returns negative on failure
1943
 */
1944
1945
int decompress_cmp_entiy(const struct cmp_entity *ent, const void *model_of_data,
1946
       void *up_model_buf, void *decompressed_data)
1947
1.26k
{
1948
1.26k
  struct cmp_cfg cfg;
1949
1.26k
  int decmp_size;
1950
1.26k
  int i, n_chunks;
1951
1952
1.26k
  memset(&cfg, 0, sizeof(struct cmp_cfg));
1953
1954
1.26k
  if (!ent)
1955
10
    return -1;
1956
1957
1.25k
  decmp_size = (int)cmp_ent_get_original_size(ent);
1958
1.25k
  if (decmp_size < 0)
1959
0
    return -1;
1960
1.25k
  if (decmp_size == 0)
1961
3
    return 0;
1962
1963
1.24k
  if (cmp_ent_read_header(ent, &cfg))
1964
19
    return -1;
1965
1966
1.22k
  if (cfg.data_type != DATA_TYPE_CHUNK) { /* perform a non-chunk decompression */
1967
251
    if (cfg.cmp_mode == CMP_MODE_RAW) {
1968
19
      uint32_t data_size = cfg.samples * sizeof(uint16_t);
1969
1970
19
      if (decompressed_data) {
1971
9
        memcpy(decompressed_data, cmp_ent_get_data_buf_const(ent), data_size);
1972
9
        if (cmp_input_big_to_cpu_endianness(decompressed_data, data_size, cfg.data_type))
1973
0
          return -1;
1974
9
      }
1975
19
      return (int)data_size;
1976
19
    }
1977
1978
232
    cfg.model_buf = model_of_data;
1979
232
    cfg.updated_model_buf = up_model_buf;
1980
232
    cfg.dst = decompressed_data;
1981
1982
232
    return decompressed_data_internal(&cfg, RDCU_DECOMPRESSION);
1983
251
  }
1984
1985
  /* perform a chunk decompression */
1986
1987
977
  if (cfg.cmp_mode == CMP_MODE_RAW) {
1988
115
    if (decompressed_data) {
1989
57
      memcpy(decompressed_data, cfg.src, cfg.stream_size);
1990
57
      cpu_to_be_chunk(decompressed_data, cfg.stream_size);
1991
57
    }
1992
115
    return (int)cfg.stream_size;
1993
115
  }
1994
1995
862
  n_chunks = get_num_of_chunks(ent);
1996
862
  if (n_chunks <= 0)
1997
2
    return -1;
1998
1999
2.81k
  for (i = 0; i < n_chunks; i++) {
2000
2.19k
    int decmp_chunk_size;
2001
2.19k
    int col_uncompressed;
2002
2.19k
    struct cmp_cfg cmp_cpy = cfg;
2003
2.19k
    long offset = parse_cmp_collection(cmp_ent_get_data_buf_const(ent), i,
2004
2.19k
               &cmp_cpy, &col_uncompressed, decmp_size);
2005
2.19k
    if (offset < 0)
2006
20
      return -1;
2007
2008
2.17k
    if (decompressed_data)
2009
855
      cmp_cpy.dst = (uint8_t *)decompressed_data + offset;
2010
2.17k
    if (model_of_data)
2011
1.26k
      cmp_cpy.model_buf = (const uint8_t *)model_of_data + offset;
2012
2.17k
    if (up_model_buf)
2013
402
      cmp_cpy.updated_model_buf = (uint8_t *)up_model_buf + offset;
2014
2015
2.17k
    if (col_uncompressed) {
2016
1.16k
      if (cmp_cpy.updated_model_buf && model_mode_is_used(cmp_cpy.cmp_mode)) {
2017
278
        uint32_t s = cmp_cpy.stream_size;
2018
278
        memcpy(cmp_cpy.updated_model_buf, cmp_cpy.src, s);
2019
278
        if (be_to_cpu_chunk(cmp_cpy.updated_model_buf, s))
2020
0
          return -1;
2021
278
      }
2022
1.16k
      cmp_cpy.cmp_mode = CMP_MODE_RAW;
2023
1.16k
    }
2024
2025
2.17k
    decmp_chunk_size = decompressed_data_internal(&cmp_cpy, ICU_DECOMRESSION);
2026
2.17k
    if (decmp_chunk_size < 0)
2027
222
      return decmp_chunk_size;
2028
2.17k
  }
2029
618
  return decmp_size;
2030
860
}
2031
2032
2033
/**
2034
 * @brief decompress RDCU compressed data without a compression entity header
2035
 *
2036
 * @param compressed_data pointer to the RDCU compressed data (without a
2037
 *        compression entity header)
2038
 * @param info      pointer to a decompression information structure
2039
 *        consisting of the metadata of the compression
2040
 * @param model_of_data   pointer to model data buffer (can be NULL if no
2041
 *        model compression mode is used)
2042
 * @param up_model_buf    pointer to store the updated model for the next model
2043
 *        mode compression (can be the same as the model_of_data
2044
 *        buffer for an in-place update or NULL if the
2045
 *        updated model is not needed)
2046
 * @param decompressed_data pointer to the decompressed data buffer (can be NULL)
2047
 *
2048
 * @returns the size of the decompressed data on success; returns negative on failure
2049
 */
2050
2051
int decompress_rdcu_data(const uint32_t *compressed_data, const struct cmp_info *info,
2052
       const uint16_t *model_of_data, uint16_t *up_model_buf,
2053
       uint16_t *decompressed_data)
2054
2055
0
{
2056
0
  struct cmp_cfg cfg;
2057
2058
0
  if (!compressed_data)
2059
0
    return -1;
2060
2061
0
  if (!info)
2062
0
    return -1;
2063
2064
0
  if (info->cmp_err)
2065
0
    return -1;
2066
2067
0
  memset(&cfg, 0, sizeof(struct cmp_cfg));
2068
2069
0
  cfg.data_type = DATA_TYPE_IMAGETTE;
2070
0
  cfg.model_buf = model_of_data;
2071
0
  cfg.updated_model_buf = up_model_buf;
2072
0
  cfg.dst = decompressed_data;
2073
2074
0
  cfg.cmp_mode = info->cmp_mode_used;
2075
0
  cfg.model_value = info->model_value_used;
2076
0
  cfg.round = info->round_used;
2077
0
  cfg.spill_imagette = info->spill_used;
2078
0
  cfg.cmp_par_imagette = info->golomb_par_used;
2079
0
  cfg.samples = info->samples_used;
2080
0
  cfg.src = compressed_data;
2081
0
  cfg.stream_size = (info->cmp_size+7)/8;
2082
2083
0
  return decompressed_data_internal(&cfg, RDCU_DECOMPRESSION);
2084
0
}