Coverage Report

Created: 2025-06-15 00:57

/src/cmp_tool/lib/icu_compress/cmp_icu.c
Line
Count
Source (jump to first uncovered line)
1
/**
2
 * @file   cmp_icu.c
3
 * @author Dominik Loidolt (dominik.loidolt@univie.ac.at)
4
 * @date   2020
5
 *
6
 * @copyright GPLv2
7
 * This program is free software; you can redistribute it and/or modify it
8
 * under the terms and conditions of the GNU General Public License,
9
 * version 2, as published by the Free Software Foundation.
10
 *
11
 * This program is distributed in the hope it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
 * more details.
15
 *
16
 * @brief software compression library
17
 * @see Data Compression User Manual PLATO-UVIE-PL-UM-0001
18
 */
19
20
21
#include <stdint.h>
22
#include <string.h>
23
#include <limits.h>
24
25
#include "../common/byteorder.h"
26
#include "../common/compiler.h"
27
#include "../common/cmp_debug.h"
28
#include "../common/cmp_data_types.h"
29
#include "../common/cmp_support.h"
30
#include "../common/cmp_cal_up_model.h"
31
#include "../common/cmp_max_used_bits.h"
32
#include "../common/cmp_entity.h"
33
#include "../common/cmp_error.h"
34
#include "../common/cmp_error_list.h"
35
#include "../common/leon_inttypes.h"
36
#include "cmp_chunk_type.h"
37
38
#include "../cmp_icu.h"
39
#include "../cmp_chunk.h"
40
41
42
/**
43
 * @brief default implementation of the get_timestamp() function
44
 *
45
 * @returns 0
46
 */
47
48
static uint64_t default_get_timestamp(void)
49
7.72k
{
50
7.72k
  return 0;
51
7.72k
}
52
53
54
/**
55
 * @brief function pointer to a function returning a current PLATO timestamp
56
 *  initialised with the compress_chunk_init() function
57
 */
58
59
static uint64_t (*get_timestamp)(void) = default_get_timestamp;
60
61
62
/**
63
 * @brief holding the version_identifier for the compression header
64
 *  initialised with the compress_chunk_init() function
65
 */
66
67
static uint32_t version_identifier;
68
69
70
/**
71
 * @brief structure to hold a setup to encode a value
72
 */
73
74
struct encoder_setup {
75
  uint32_t (*generate_cw_f)(uint32_t value, uint32_t encoder_par1,
76
          uint32_t encoder_par2, uint32_t *cw); /**< function pointer to a code word encoder */
77
  uint32_t (*encode_method_f)(uint32_t data, uint32_t model, uint32_t stream_len,
78
            const struct encoder_setup *setup); /**< pointer to the encoding function */
79
  uint32_t *bitstream_adr; /**< start address of the compressed data bitstream */
80
  uint32_t max_stream_len; /**< maximum length of the bitstream in bits */
81
  uint32_t encoder_par1;   /**< encoding parameter 1 */
82
  uint32_t encoder_par2;   /**< encoding parameter 2 */
83
  uint32_t spillover_par;  /**< outlier parameter */
84
  uint32_t lossy_par;      /**< lossy compression parameter */
85
  uint32_t max_data_bits;  /**< how many bits are needed to represent the highest possible value */
86
};
87
88
89
/**
90
 * @brief map a signed value into a positive value range
91
 *
92
 * @param value_to_map  signed value to map
93
 * @param max_data_bits how many bits are needed to represent the
94
 *      highest possible value
95
 *
96
 * @returns the positive mapped value
97
 */
98
99
static uint32_t map_to_pos(uint32_t value_to_map, unsigned int max_data_bits)
100
585k
{
101
585k
  uint32_t const mask = (~0U >> (32 - max_data_bits)); /* mask the used bits */
102
585k
  uint32_t result;
103
104
585k
  value_to_map &= mask;
105
585k
  if (value_to_map >> (max_data_bits - 1)) { /* check the leading signed bit */
106
226k
    value_to_map |= ~mask; /* convert to 32-bit signed integer */
107
    /* map negative values to uneven numbers */
108
226k
    result = (-value_to_map) * 2 - 1; /* possible integer overflow is intended */
109
359k
  } else {
110
    /* map positive values to even numbers */
111
359k
    result = value_to_map * 2; /* possible integer overflow is intended */
112
359k
  }
113
114
585k
  return result;
115
585k
}
116
117
118
/**
119
 * @brief put the value of up to 32 bits into a big-endian bitstream
120
 *
121
 * @param value     the value to put into the bitstream
122
 * @param n_bits    number of bits to put into the bitstream
123
 * @param bit_offset    bit index where the bits will be put, seen from
124
 *        the very beginning of the bitstream
125
 * @param bitstream_adr   this is the pointer to the beginning of the
126
 *        bitstream (can be NULL)
127
 * @param max_stream_len  maximum length of the bitstream in *bits*; is
128
 *        ignored if bitstream_adr is NULL
129
 *
130
 * @returns the length of the generated bitstream in bits on success or an error
131
 *          code (which can be tested with cmp_is_error()) in the event of an
132
 *          incorrect input or if the bitstream buffer is too small to put the
133
 *          value in the bitstream.
134
 */
135
136
static uint32_t put_n_bits32(uint32_t value, unsigned int n_bits, uint32_t bit_offset,
137
           uint32_t *bitstream_adr, unsigned int max_stream_len)
138
903k
{
139
  /*
140
   *                               UNSEGMENTED
141
   * |-----------|XXXXXX|---------------|--------------------------------|
142
   * |-bits_left-|n_bits|-------------------bits_right-------------------|
143
   * ^
144
   * local_adr
145
   *                               SEGMENTED
146
   * |-----------------------------|XXX|XXX|-----------------------------|
147
   * |----------bits_left----------|n_bits-|---------bits_right----------|
148
   */
149
903k
  uint32_t const bits_left = bit_offset & 0x1F;
150
903k
  uint32_t const bits_right = 64 - bits_left - n_bits;
151
903k
  uint32_t const shift_left = 32 - n_bits;
152
903k
  uint32_t const stream_len = n_bits + bit_offset; /* no check for overflow */
153
903k
  uint32_t *local_adr;
154
903k
  uint32_t mask, tmp;
155
156
  /* Leave in case of erroneous input */
157
903k
  RETURN_ERROR_IF((int)shift_left < 0, INT_DECODER, "cannot insert more than 32 bits into the bit stream");  /* check n_bits <= 32 */
158
159
903k
  if (n_bits == 0)
160
0
    return stream_len;
161
162
903k
  if (!bitstream_adr)  /* Do we need to write data to the bitstream? */
163
537k
    return stream_len;
164
165
  /* Check if the bitstream buffer is large enough */
166
366k
  if (stream_len > max_stream_len)
167
24.3k
    return CMP_ERROR(SMALL_BUFFER);
168
169
342k
  local_adr = bitstream_adr + (bit_offset >> 5);
170
171
  /* clear the destination with inverse mask */
172
342k
  mask = (0XFFFFFFFFU << shift_left) >> bits_left;
173
342k
  tmp = be32_to_cpu(*local_adr) & ~mask;
174
175
  /* put (the first part of) the value into the bitstream */
176
342k
  tmp |= (value << shift_left) >> bits_left;
177
342k
  *local_adr = cpu_to_be32(tmp);
178
179
  /* Do we need to split the value over two words (SEGMENTED case) */
180
342k
  if (bits_right < 32) {
181
157k
    local_adr++;  /* adjust address */
182
183
    /* clear the destination */
184
157k
    mask = 0XFFFFFFFFU << bits_right;
185
157k
    tmp = be32_to_cpu(*local_adr) & ~mask;
186
187
    /* put the 2nd part of the value into the bitstream */
188
157k
    tmp |= value << bits_right;
189
157k
    *local_adr = cpu_to_be32(tmp);
190
157k
  }
191
342k
  return stream_len;
192
366k
}
193
194
195
/**
196
 * @brief forms the codeword according to the Rice code
197
 *
198
 * @param value   value to be encoded (must be smaller or equal than cmp_ima_max_spill(m))
199
 * @param m   Golomb parameter, only m's which are a power of 2 are allowed
200
 *      maximum allowed Golomb parameter is 0x80000000
201
 * @param log2_m  Rice parameter, is ilog_2(m) calculate outside function
202
 *      for better performance
203
 * @param cw    address where the code word is stored
204
 *
205
 * @warning there is no check of the validity of the input parameters!
206
 * @returns the length of the formed code word in bits; the code word is invalid
207
 *  if the return value is greater than 32
208
 */
209
210
static uint32_t rice_encoder(uint32_t value, uint32_t m, uint32_t log2_m,
211
           uint32_t *cw)
212
159k
{
213
159k
  uint32_t const q = value >> log2_m;  /* quotient of value/m */
214
159k
  uint32_t const qc = (1U << q) - 1;   /* quotient code without ending zero */
215
216
159k
  uint32_t const r = value & (m-1);    /* remainder of value/m */
217
159k
  uint32_t const rl = log2_m + 1;      /* length of the remainder (+1 for the 0 in the quotient code) */
218
219
159k
  *cw = (qc << (rl & 0x1FU)) | r; /* put the quotient and remainder code together */
220
  /*
221
   * NOTE: If log2_m = 31 -> rl = 32, (q << rl) leads to an undefined
222
   * behavior. However, in this case, a valid code with a maximum of 32
223
   * bits can only be formed if q = 0 and qc = 0. To prevent undefined
224
   * behavior, the right shift operand is masked (& 0x1FU)
225
   */
226
227
159k
  return rl + q;  /* calculate the length of the code word */
228
159k
}
229
230
231
/**
232
 * @brief forms a codeword according to the Golomb code
233
 *
234
 * @param value   value to be encoded (must be smaller or equal than cmp_ima_max_spill(m))
235
 * @param m   Golomb parameter (have to be bigger than 0)
236
 * @param log2_m  is ilog_2(m) calculate outside function for better performance
237
 * @param cw    address where the code word is stored
238
 *
239
 * @warning there is no check of the validity of the input parameters!
240
 * @returns the length of the formed code word in bits; the code word is invalid
241
 *  if the return value is greater than 32
242
 */
243
244
static uint32_t golomb_encoder(uint32_t value, uint32_t m, uint32_t log2_m,
245
             uint32_t *cw)
246
426k
{
247
426k
  uint32_t len = log2_m + 1;  /* codeword length in group 0 */
248
426k
  uint32_t const cutoff = (0x2U << log2_m) - m;  /* members in group 0 */
249
250
426k
  if (value < cutoff) {  /* group 0 */
251
282k
    *cw = value;
252
282k
  } else {  /* other groups */
253
143k
    uint32_t const reg_mask = 0x1FU;  /* mask for the right shift operand to prevent undefined behavior */
254
143k
    uint32_t const g = (value-cutoff) / m;  /* group number of same cw length */
255
143k
    uint32_t const r = (value-cutoff) - g * m; /* member in the group */
256
143k
    uint32_t const gc = (1U << (g & reg_mask)) - 1; /* prepare the left side in unary */
257
143k
    uint32_t const b = cutoff << 1;         /* form the base codeword */
258
259
143k
    *cw = gc << ((len+1) & reg_mask);  /* composed codeword part 1 */
260
143k
    *cw += b + r;                      /* composed codeword part 2 */
261
143k
    len += 1 + g;                      /* length of the codeword */
262
143k
  }
263
426k
  return len;
264
426k
}
265
266
267
/**
268
 * @brief generate a code word without an outlier mechanism and put it in the
269
 *  bitstream
270
 *
271
 * @param value   value to encode in the bitstream
272
 * @param stream_len  length of the bitstream in bits
273
 * @param setup   pointer to the encoder setup
274
 *
275
 * @returns the bit length of the bitstream on success or an error code if it
276
 *  fails (which can be tested with cmp_is_error())
277
 */
278
279
static uint32_t encode_normal(uint32_t value, uint32_t stream_len,
280
            const struct encoder_setup *setup)
281
585k
{
282
585k
  uint32_t code_word, cw_len;
283
284
585k
  cw_len = setup->generate_cw_f(value, setup->encoder_par1,
285
585k
              setup->encoder_par2, &code_word);
286
287
585k
  return put_n_bits32(code_word, cw_len, stream_len, setup->bitstream_adr,
288
585k
          setup->max_stream_len);
289
585k
}
290
291
292
/**
293
 * @brief subtracts the model from the data, encodes the result and puts it into
294
 *  bitstream, for encoding outlier use the zero escape symbol mechanism
295
 *
296
 * @param data    data to encode
297
 * @param model   model of the data (0 if not used)
298
 * @param stream_len  length of the bitstream in bits
299
 * @param setup   pointer to the encoder setup
300
 *
301
 * @returns the bit length of the bitstream on success or an error code if it
302
 *  fails (which can be tested with cmp_is_error())
303
 *
304
 * @note no check if the data or model are in the allowed range
305
 * @note no check if the setup->spillover_par is in the allowed range
306
 */
307
308
static uint32_t encode_value_zero(uint32_t data, uint32_t model, uint32_t stream_len,
309
          const struct encoder_setup *setup)
310
385k
{
311
385k
  data -= model; /* possible underflow is intended */
312
313
385k
  data = map_to_pos(data, setup->max_data_bits);
314
315
  /* For performance reasons, we check to see if there is an outlier
316
   * before adding one, rather than the other way around:
317
   * data++;
318
   * if (data < setup->spillover_par && data != 0)
319
   *  return ...
320
   */
321
385k
  if (data < (setup->spillover_par - 1)) { /* detect non-outlier */
322
175k
    data++; /* add 1 to every value so we can use 0 as the escape symbol */
323
175k
    return encode_normal(data, stream_len, setup);
324
175k
  }
325
326
210k
  data++; /* add 1 to every value so we can use 0 as the escape symbol */
327
328
  /* use zero as escape symbol */
329
210k
  stream_len = encode_normal(0, stream_len, setup);
330
210k
  if (cmp_is_error(stream_len))
331
1.73k
    return stream_len;
332
333
  /* put the data unencoded in the bitstream */
334
208k
  stream_len = put_n_bits32(data, setup->max_data_bits, stream_len,
335
208k
          setup->bitstream_adr, setup->max_stream_len);
336
208k
  return stream_len;
337
210k
}
338
339
340
/**
341
 * @brief subtract the model from the data, encode the result and puts it into
342
 *  bitstream, for encoding outlier use the multi escape symbol mechanism
343
 *
344
 * @param data    data to encode
345
 * @param model   model of the data (0 if not used)
346
 * @param stream_len  length of the bitstream in bits
347
 * @param setup   pointer to the encoder setup
348
 *
349
 * @returns the bit length of the bitstream on success or an error code if it
350
 *  fails (which can be tested with cmp_is_error())
351
 *
352
 * @note no check if the data or model are in the allowed range
353
 * @note no check if the setup->spillover_par is in the allowed range
354
 */
355
356
static uint32_t encode_value_multi(uint32_t data, uint32_t model, uint32_t stream_len,
357
           const struct encoder_setup *setup)
358
200k
{
359
200k
  uint32_t unencoded_data;
360
200k
  unsigned int unencoded_data_len;
361
200k
  uint32_t escape_sym, escape_sym_offset;
362
363
200k
  data -= model; /* possible underflow is intended */
364
365
200k
  data = map_to_pos(data, setup->max_data_bits);
366
367
200k
  if (data < setup->spillover_par) /* detect non-outlier */
368
91.5k
    return  encode_normal(data, stream_len, setup);
369
370
  /*
371
   * In this mode we put the difference between the data and the spillover
372
   * threshold value (unencoded_data) after an encoded escape symbol, which
373
   * indicates that the next codeword is unencoded.
374
   * We use different escape symbols depending on the size of the needed
375
   * bit of unencoded data:
376
   * 0, 1, 2 bits needed for unencoded data -> escape symbol is spillover_par + 0
377
   * 3, 4 bits needed for unencoded data -> escape symbol is spillover_par + 1
378
   * 5, 6 bits needed for unencoded data -> escape symbol is spillover_par + 2
379
   * and so on
380
   */
381
108k
  unencoded_data = data - setup->spillover_par;
382
383
108k
  if (!unencoded_data) /* catch __builtin_clz(0) because the result is undefined.*/
384
883
    escape_sym_offset = 0;
385
107k
  else
386
107k
    escape_sym_offset = (31U - (uint32_t)__builtin_clz(unencoded_data)) >> 1;
387
388
108k
  escape_sym = setup->spillover_par + escape_sym_offset;
389
108k
  unencoded_data_len = (escape_sym_offset + 1U) << 1;
390
391
  /* put the escape symbol in the bitstream */
392
108k
  stream_len = encode_normal(escape_sym, stream_len, setup);
393
108k
  if (cmp_is_error(stream_len))
394
6.75k
    return stream_len;
395
396
  /* put the unencoded data in the bitstream */
397
101k
  stream_len = put_n_bits32(unencoded_data, unencoded_data_len, stream_len,
398
101k
          setup->bitstream_adr, setup->max_stream_len);
399
101k
  return stream_len;
400
108k
}
401
402
403
/**
404
 * @brief encodes the data with the model and the given setup and put it into
405
 *  the bitstream
406
 *
407
 * @param data    data to encode
408
 * @param model   model of the data (0 if not used)
409
 * @param stream_len  length of the bitstream in bits
410
 * @param setup   pointer to the encoder setup
411
 *
412
 * @returns the bit length of the bitstream on success or an error code if it
413
 *  fails (which can be tested with cmp_is_error())
414
 */
415
416
static uint32_t encode_value(uint32_t data, uint32_t model, uint32_t stream_len,
417
           const struct encoder_setup *setup)
418
585k
{
419
585k
  uint32_t const mask = ~(0xFFFFFFFFU >> (32-setup->max_data_bits));
420
421
  /* lossy rounding of the data if lossy_par > 0 */
422
585k
  data = round_fwd(data, setup->lossy_par);
423
585k
  model = round_fwd(model, setup->lossy_par);
424
425
585k
  RETURN_ERROR_IF(data & mask || model & mask, DATA_VALUE_TOO_LARGE, "");
426
427
585k
  return setup->encode_method_f(data, model, stream_len, setup);
428
585k
}
429
430
431
/**
432
 * @brief calculate the maximum length of the bitstream in bits
433
 * @note we round down to the next 4-byte allied address because we access the
434
 *  cmp_buffer in uint32_t words
435
 *
436
 * @param stream_size size of the bitstream in bytes
437
 *
438
 * @returns buffer size in bits
439
 */
440
441
static uint32_t cmp_stream_size_to_bits(uint32_t stream_size)
442
239k
{
443
239k
  return (stream_size & ~0x3U) * 8;
444
239k
}
445
446
447
/**
448
 * @brief configure an encoder setup structure to have a setup to encode a value
449
 *
450
 * @param setup   pointer to the encoder setup
451
 * @param cmp_par compression parameter
452
 * @param spillover spillover_par parameter
453
 * @param lossy_par lossy compression parameter
454
 * @param max_data_bits how many bits are needed to represent the highest possible value
455
 * @param cfg   pointer to the compression configuration structure
456
 *
457
 * @warning input parameters are not checked for validity
458
 */
459
460
static void configure_encoder_setup(struct encoder_setup *setup,
461
            uint32_t cmp_par, uint32_t spillover,
462
            uint32_t lossy_par, uint32_t max_data_bits,
463
            const struct cmp_cfg *cfg)
464
176k
{
465
176k
  memset(setup, 0, sizeof(struct encoder_setup));
466
467
176k
  setup->encoder_par1 = cmp_par;
468
176k
  setup->max_data_bits = max_data_bits;
469
176k
  setup->lossy_par = lossy_par;
470
176k
  setup->bitstream_adr = cfg->dst;
471
176k
  setup->max_stream_len = cmp_stream_size_to_bits(cfg->stream_size);
472
176k
  setup->encoder_par2 = ilog_2(cmp_par);
473
176k
  setup->spillover_par = spillover;
474
475
  /* for encoder_par1 which is a power of two we can use the faster rice_encoder */
476
176k
  if (is_a_pow_of_2(setup->encoder_par1))
477
46.6k
    setup->generate_cw_f = &rice_encoder;
478
129k
  else
479
129k
    setup->generate_cw_f = &golomb_encoder;
480
481
  /* CMP_MODE_RAW is already handled before */
482
176k
  if (cfg->cmp_mode == CMP_MODE_MODEL_ZERO ||
483
176k
      cfg->cmp_mode == CMP_MODE_DIFF_ZERO)
484
108k
    setup->encode_method_f = &encode_value_zero;
485
67.2k
  else
486
67.2k
    setup->encode_method_f = &encode_value_multi;
487
176k
}
488
489
490
/**
491
 * @brief compress imagette data
492
 *
493
 * @param cfg   pointer to the compression configuration structure
494
 * @param stream_len  already used length of the bitstream in bits
495
 *
496
 * @returns the bit length of the bitstream on success or an error code if it
497
 *  fails (which can be tested with cmp_is_error())
498
 */
499
500
static uint32_t compress_imagette(const struct cmp_cfg *cfg, uint32_t stream_len)
501
25.3k
{
502
25.3k
  size_t i;
503
25.3k
  struct encoder_setup setup;
504
25.3k
  uint32_t max_data_bits;
505
506
25.3k
  const uint16_t *data_buf = cfg->src;
507
25.3k
  const uint16_t *model_buf = cfg->model_buf;
508
25.3k
  uint16_t model = 0;
509
25.3k
  const uint16_t *next_model_p = data_buf;
510
25.3k
  uint16_t *up_model_buf = NULL;
511
512
25.3k
  if (model_mode_is_used(cfg->cmp_mode)) {
513
19.0k
    model = get_unaligned(&model_buf[0]);
514
19.0k
    next_model_p = &model_buf[1];
515
19.0k
    up_model_buf = cfg->updated_model_buf;
516
19.0k
  }
517
518
25.3k
  if (cfg->data_type == DATA_TYPE_F_CAM_IMAGETTE ||
519
25.3k
      cfg->data_type == DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE) {
520
891
    max_data_bits = MAX_USED_BITS.fc_imagette;
521
24.4k
  } else if (cfg->data_type == DATA_TYPE_SAT_IMAGETTE ||
522
24.4k
       cfg->data_type == DATA_TYPE_SAT_IMAGETTE_ADAPTIVE) {
523
19.9k
    max_data_bits = MAX_USED_BITS.saturated_imagette;
524
19.9k
  } else { /* DATA_TYPE_IMAGETTE, DATA_TYPE_IMAGETTE_ADAPTIVE */
525
4.49k
    max_data_bits = MAX_USED_BITS.nc_imagette;
526
4.49k
  }
527
528
25.3k
  configure_encoder_setup(&setup, cfg->cmp_par_imagette,
529
25.3k
        cfg->spill_imagette, cfg->round, max_data_bits, cfg);
530
531
150k
  for (i = 0;; i++) {
532
150k
    stream_len = encode_value(get_unaligned(&data_buf[i]),
533
150k
            model, stream_len, &setup);
534
150k
    if (cmp_is_error(stream_len))
535
12.7k
      break;
536
537
137k
    if (up_model_buf) {
538
56.7k
      uint16_t data = get_unaligned(&data_buf[i]);
539
56.7k
      up_model_buf[i] = cmp_up_model(data, model, cfg->model_value,
540
56.7k
                   setup.lossy_par);
541
56.7k
    }
542
137k
    if (i >= cfg->samples-1)
543
12.5k
      break;
544
545
125k
    model = get_unaligned(&next_model_p[i]);
546
125k
  }
547
25.3k
  return stream_len;
548
25.3k
}
549
550
551
/**
552
 * @brief compress short normal light flux (S_FX) data
553
 *
554
 * @param cfg   pointer to the compression configuration structure
555
 * @param stream_len  already used length of the bitstream in bits
556
 *
557
 * @returns the bit length of the bitstream on success or an error code if it
558
 *  fails (which can be tested with cmp_is_error())
559
 */
560
561
static uint32_t compress_s_fx(const struct cmp_cfg *cfg, uint32_t stream_len)
562
2.51k
{
563
2.51k
  size_t i;
564
565
2.51k
  const struct s_fx *data_buf = cfg->src;
566
2.51k
  const struct s_fx *model_buf = cfg->model_buf;
567
2.51k
  struct s_fx *up_model_buf = NULL;
568
2.51k
  const struct s_fx *next_model_p;
569
2.51k
  struct s_fx model;
570
2.51k
  struct encoder_setup setup_exp_flag, setup_fx;
571
572
2.51k
  if (model_mode_is_used(cfg->cmp_mode)) {
573
905
    model = model_buf[0];
574
905
    next_model_p = &model_buf[1];
575
905
    up_model_buf = cfg->updated_model_buf;
576
1.61k
  } else {
577
1.61k
    memset(&model, 0, sizeof(model));
578
1.61k
    next_model_p = data_buf;
579
1.61k
  }
580
581
2.51k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
582
2.51k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
583
2.51k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
584
2.51k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
585
586
11.2k
  for (i = 0;; i++) {
587
11.2k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
588
11.2k
            stream_len, &setup_exp_flag);
589
11.2k
    if (cmp_is_error(stream_len))
590
285
      break;
591
10.9k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
592
10.9k
            &setup_fx);
593
10.9k
    if (cmp_is_error(stream_len))
594
568
      break;
595
596
10.3k
    if (up_model_buf) {
597
2.49k
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
598
2.49k
                 cfg->model_value, setup_exp_flag.lossy_par);
599
2.49k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
600
2.49k
                cfg->model_value, setup_fx.lossy_par);
601
2.49k
    }
602
603
10.3k
    if (i >= cfg->samples-1)
604
1.66k
      break;
605
606
8.71k
    model = next_model_p[i];
607
8.71k
  }
608
2.51k
  return stream_len;
609
2.51k
}
610
611
612
/**
613
 * @brief compress S_FX_EFX data
614
 *
615
 * @param cfg   pointer to the compression configuration structure
616
 * @param stream_len  already used length of the bitstream in bits
617
 *
618
 * @returns the bit length of the bitstream on success or an error code if it
619
 *  fails (which can be tested with cmp_is_error())
620
 */
621
622
static uint32_t compress_s_fx_efx(const struct cmp_cfg *cfg, uint32_t stream_len)
623
3.68k
{
624
3.68k
  size_t i;
625
626
3.68k
  const struct s_fx_efx *data_buf = cfg->src;
627
3.68k
  const struct s_fx_efx *model_buf = cfg->model_buf;
628
3.68k
  struct s_fx_efx *up_model_buf = NULL;
629
3.68k
  const struct s_fx_efx *next_model_p;
630
3.68k
  struct s_fx_efx model;
631
3.68k
  struct encoder_setup setup_exp_flag, setup_fx, setup_efx;
632
633
3.68k
  if (model_mode_is_used(cfg->cmp_mode)) {
634
1.52k
    model = model_buf[0];
635
1.52k
    next_model_p = &model_buf[1];
636
1.52k
    up_model_buf = cfg->updated_model_buf;
637
2.15k
  } else {
638
2.15k
    memset(&model, 0, sizeof(model));
639
2.15k
    next_model_p = data_buf;
640
2.15k
  }
641
642
3.68k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
643
3.68k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
644
3.68k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
645
3.68k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
646
3.68k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
647
3.68k
        cfg->round, MAX_USED_BITS.s_efx, cfg);
648
649
10.2k
  for (i = 0;; i++) {
650
10.2k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
651
10.2k
            stream_len, &setup_exp_flag);
652
10.2k
    if (cmp_is_error(stream_len))
653
147
      break;
654
10.1k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
655
10.1k
            &setup_fx);
656
10.1k
    if (cmp_is_error(stream_len))
657
402
      break;
658
9.74k
    stream_len = encode_value(data_buf[i].efx, model.efx,
659
9.74k
            stream_len, &setup_efx);
660
9.74k
    if (cmp_is_error(stream_len))
661
383
      return stream_len;
662
663
9.35k
    if (up_model_buf) {
664
2.75k
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
665
2.75k
        cfg->model_value, setup_exp_flag.lossy_par);
666
2.75k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
667
2.75k
        cfg->model_value, setup_fx.lossy_par);
668
2.75k
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
669
2.75k
        cfg->model_value, setup_efx.lossy_par);
670
2.75k
    }
671
672
9.35k
    if (i >= cfg->samples-1)
673
2.75k
      break;
674
675
6.60k
    model = next_model_p[i];
676
6.60k
  }
677
3.30k
  return stream_len;
678
3.68k
}
679
680
681
/**
682
 * @brief compress S_FX_NCOB data
683
 *
684
 * @param cfg   pointer to the compression configuration structure
685
 * @param stream_len  already used length of the bitstream in bits
686
 *
687
 * @returns the bit length of the bitstream on success or an error code if it
688
 *  fails (which can be tested with cmp_is_error())
689
 */
690
691
static uint32_t compress_s_fx_ncob(const struct cmp_cfg *cfg, uint32_t stream_len)
692
3.01k
{
693
3.01k
  size_t i;
694
695
3.01k
  const struct s_fx_ncob *data_buf = cfg->src;
696
3.01k
  const struct s_fx_ncob *model_buf = cfg->model_buf;
697
3.01k
  struct s_fx_ncob *up_model_buf = NULL;
698
3.01k
  const struct s_fx_ncob *next_model_p;
699
3.01k
  struct s_fx_ncob model;
700
3.01k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob;
701
702
3.01k
  if (model_mode_is_used(cfg->cmp_mode)) {
703
1.29k
    model = model_buf[0];
704
1.29k
    next_model_p = &model_buf[1];
705
1.29k
    up_model_buf = cfg->updated_model_buf;
706
1.72k
  } else {
707
1.72k
    memset(&model, 0, sizeof(model));
708
1.72k
    next_model_p = data_buf;
709
1.72k
  }
710
711
3.01k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
712
3.01k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
713
3.01k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
714
3.01k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
715
3.01k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
716
3.01k
        cfg->round, MAX_USED_BITS.s_ncob, cfg);
717
718
10.0k
  for (i = 0;; i++) {
719
10.0k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
720
10.0k
            stream_len, &setup_exp_flag);
721
10.0k
    if (cmp_is_error(stream_len))
722
97
      break;
723
9.90k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
724
9.90k
            &setup_fx);
725
9.90k
    if (cmp_is_error(stream_len))
726
164
      break;
727
9.74k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
728
9.74k
            stream_len, &setup_ncob);
729
9.74k
    if (cmp_is_error(stream_len))
730
242
      break;
731
9.50k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
732
9.50k
            stream_len, &setup_ncob);
733
9.50k
    if (cmp_is_error(stream_len))
734
216
      break;
735
736
9.28k
    if (up_model_buf) {
737
2.85k
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
738
2.85k
        cfg->model_value, setup_exp_flag.lossy_par);
739
2.85k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
740
2.85k
        cfg->model_value, setup_fx.lossy_par);
741
2.85k
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
742
2.85k
        cfg->model_value, setup_ncob.lossy_par);
743
2.85k
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
744
2.85k
        cfg->model_value, setup_ncob.lossy_par);
745
2.85k
    }
746
747
9.28k
    if (i >= cfg->samples-1)
748
2.29k
      break;
749
750
6.98k
    model = next_model_p[i];
751
6.98k
  }
752
3.01k
  return stream_len;
753
3.01k
}
754
755
756
/**
757
 * @brief compress S_FX_EFX_NCOB_ECOB data
758
 *
759
 * @param cfg   pointer to the compression configuration structure
760
 * @param stream_len  already used length of the bitstream in bits
761
 *
762
 * @returns the bit length of the bitstream on success or an error code if it
763
 *  fails (which can be tested with cmp_is_error())
764
 */
765
766
static uint32_t compress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, uint32_t stream_len)
767
3.15k
{
768
3.15k
  size_t i;
769
770
3.15k
  const struct s_fx_efx_ncob_ecob *data_buf = cfg->src;
771
3.15k
  const struct s_fx_efx_ncob_ecob *model_buf = cfg->model_buf;
772
3.15k
  struct s_fx_efx_ncob_ecob *up_model_buf = NULL;
773
3.15k
  const struct s_fx_efx_ncob_ecob *next_model_p;
774
3.15k
  struct s_fx_efx_ncob_ecob model;
775
3.15k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob, setup_efx,
776
3.15k
            setup_ecob;
777
778
3.15k
  if (model_mode_is_used(cfg->cmp_mode)) {
779
1.06k
    model = model_buf[0];
780
1.06k
    next_model_p = &model_buf[1];
781
1.06k
    up_model_buf = cfg->updated_model_buf;
782
2.09k
  } else {
783
2.09k
    memset(&model, 0, sizeof(model));
784
2.09k
    next_model_p = data_buf;
785
2.09k
  }
786
787
3.15k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
788
3.15k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
789
3.15k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
790
3.15k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
791
3.15k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
792
3.15k
        cfg->round, MAX_USED_BITS.s_ncob, cfg);
793
3.15k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
794
3.15k
        cfg->round, MAX_USED_BITS.s_efx, cfg);
795
3.15k
  configure_encoder_setup(&setup_ecob, cfg->cmp_par_ecob, cfg->spill_ecob,
796
3.15k
        cfg->round, MAX_USED_BITS.s_ecob, cfg);
797
798
6.68k
  for (i = 0;; i++) {
799
6.68k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
800
6.68k
            stream_len, &setup_exp_flag);
801
6.68k
    if (cmp_is_error(stream_len))
802
105
      break;
803
6.57k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
804
6.57k
            &setup_fx);
805
6.57k
    if (cmp_is_error(stream_len))
806
157
      break;
807
6.42k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
808
6.42k
            stream_len, &setup_ncob);
809
6.42k
    if (cmp_is_error(stream_len))
810
231
      break;
811
6.18k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
812
6.18k
            stream_len, &setup_ncob);
813
6.18k
    if (cmp_is_error(stream_len))
814
206
      break;
815
5.98k
    stream_len = encode_value(data_buf[i].efx, model.efx,
816
5.98k
            stream_len, &setup_efx);
817
5.98k
    if (cmp_is_error(stream_len))
818
343
      break;
819
5.64k
    stream_len = encode_value(data_buf[i].ecob_x, model.ecob_x,
820
5.64k
            stream_len, &setup_ecob);
821
5.64k
    if (cmp_is_error(stream_len))
822
211
      break;
823
5.42k
    stream_len = encode_value(data_buf[i].ecob_y, model.ecob_y,
824
5.42k
            stream_len, &setup_ecob);
825
5.42k
    if (cmp_is_error(stream_len))
826
205
      break;
827
828
5.22k
    if (up_model_buf) {
829
1.58k
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
830
1.58k
        cfg->model_value, setup_exp_flag.lossy_par);
831
1.58k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
832
1.58k
        cfg->model_value, setup_fx.lossy_par);
833
1.58k
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
834
1.58k
        cfg->model_value, setup_ncob.lossy_par);
835
1.58k
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
836
1.58k
        cfg->model_value, setup_ncob.lossy_par);
837
1.58k
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
838
1.58k
        cfg->model_value, setup_efx.lossy_par);
839
1.58k
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
840
1.58k
        cfg->model_value, setup_ecob.lossy_par);
841
1.58k
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
842
1.58k
        cfg->model_value, setup_ecob.lossy_par);
843
1.58k
    }
844
845
5.22k
    if (i >= cfg->samples-1)
846
1.69k
      break;
847
848
3.52k
    model = next_model_p[i];
849
3.52k
  }
850
3.15k
  return stream_len;
851
3.15k
}
852
853
854
/**
855
 * @brief compress L_FX data
856
 *
857
 * @param cfg   pointer to the compression configuration structure
858
 * @param stream_len  already used length of the bitstream in bits
859
 *
860
 * @returns the bit length of the bitstream on success or an error code if it
861
 *  fails (which can be tested with cmp_is_error())
862
 */
863
864
static uint32_t compress_l_fx(const struct cmp_cfg *cfg, uint32_t stream_len)
865
8.30k
{
866
8.30k
  size_t i;
867
868
8.30k
  const struct l_fx *data_buf = cfg->src;
869
8.30k
  const struct l_fx *model_buf = cfg->model_buf;
870
8.30k
  struct l_fx *up_model_buf = NULL;
871
8.30k
  const struct l_fx *next_model_p;
872
8.30k
  struct l_fx model;
873
8.30k
  struct encoder_setup setup_exp_flag, setup_fx, setup_fx_var;
874
875
8.30k
  if (model_mode_is_used(cfg->cmp_mode)) {
876
5.80k
    model = model_buf[0];
877
5.80k
    next_model_p = &model_buf[1];
878
5.80k
    up_model_buf = cfg->updated_model_buf;
879
5.80k
  } else {
880
2.50k
    memset(&model, 0, sizeof(model));
881
2.50k
    next_model_p = data_buf;
882
2.50k
  }
883
884
8.30k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
885
8.30k
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
886
8.30k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
887
8.30k
        cfg->round, MAX_USED_BITS.l_fx, cfg);
888
8.30k
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
889
8.30k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
890
891
12.5k
  for (i = 0;; i++) {
892
12.5k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
893
12.5k
            stream_len, &setup_exp_flag);
894
12.5k
    if (cmp_is_error(stream_len))
895
155
      break;
896
12.4k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
897
12.4k
            &setup_fx);
898
12.4k
    if (cmp_is_error(stream_len))
899
414
      break;
900
12.0k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
901
12.0k
            stream_len, &setup_fx_var);
902
12.0k
    if (cmp_is_error(stream_len))
903
716
      break;
904
905
11.3k
    if (up_model_buf) {
906
5.89k
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
907
5.89k
        cfg->model_value, setup_exp_flag.lossy_par);
908
5.89k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
909
5.89k
        cfg->model_value, setup_fx.lossy_par);
910
5.89k
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
911
5.89k
        cfg->model_value, setup_fx_var.lossy_par);
912
5.89k
    }
913
914
11.3k
    if (i >= cfg->samples-1)
915
7.02k
      break;
916
917
4.28k
    model = next_model_p[i];
918
4.28k
  }
919
8.30k
  return stream_len;
920
8.30k
}
921
922
923
/**
924
 * @brief compress L_FX_EFX data
925
 *
926
 * @param cfg   pointer to the compression configuration structure
927
 * @param stream_len  already used length of the bitstream in bits
928
 *
929
 * @returns the bit length of the bitstream on success or an error code if it
930
 *  fails (which can be tested with cmp_is_error())
931
 */
932
933
static uint32_t compress_l_fx_efx(const struct cmp_cfg *cfg, uint32_t stream_len)
934
1.79k
{
935
1.79k
  size_t i;
936
937
1.79k
  const struct l_fx_efx *data_buf = cfg->src;
938
1.79k
  const struct l_fx_efx *model_buf = cfg->model_buf;
939
1.79k
  struct l_fx_efx *up_model_buf = NULL;
940
1.79k
  const struct l_fx_efx *next_model_p;
941
1.79k
  struct l_fx_efx model;
942
1.79k
  struct encoder_setup setup_exp_flag, setup_fx, setup_efx, setup_fx_var;
943
944
1.79k
  if (model_mode_is_used(cfg->cmp_mode)) {
945
920
    model = model_buf[0];
946
920
    next_model_p = &model_buf[1];
947
920
    up_model_buf = cfg->updated_model_buf;
948
920
  } else {
949
879
    memset(&model, 0, sizeof(model));
950
879
    next_model_p = data_buf;
951
879
  }
952
953
1.79k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
954
1.79k
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
955
1.79k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
956
1.79k
        cfg->round, MAX_USED_BITS.l_fx, cfg);
957
1.79k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
958
1.79k
        cfg->round, MAX_USED_BITS.l_efx, cfg);
959
1.79k
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
960
1.79k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
961
962
9.00k
  for (i = 0;; i++) {
963
9.00k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
964
9.00k
            stream_len, &setup_exp_flag);
965
9.00k
    if (cmp_is_error(stream_len))
966
59
      break;
967
8.94k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
968
8.94k
            &setup_fx);
969
8.94k
    if (cmp_is_error(stream_len))
970
139
      break;
971
8.80k
    stream_len = encode_value(data_buf[i].efx, model.efx,
972
8.80k
            stream_len, &setup_efx);
973
8.80k
    if (cmp_is_error(stream_len))
974
169
      break;
975
8.63k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
976
8.63k
            stream_len, &setup_fx_var);
977
8.63k
    if (cmp_is_error(stream_len))
978
173
      break;
979
980
8.46k
    if (up_model_buf) {
981
2.02k
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
982
2.02k
        cfg->model_value, setup_exp_flag.lossy_par);
983
2.02k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
984
2.02k
        cfg->model_value, setup_fx.lossy_par);
985
2.02k
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
986
2.02k
        cfg->model_value, setup_efx.lossy_par);
987
2.02k
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
988
2.02k
        cfg->model_value, setup_fx_var.lossy_par);
989
2.02k
    }
990
991
8.46k
    if (i >= cfg->samples-1)
992
1.25k
      break;
993
994
7.20k
    model = next_model_p[i];
995
7.20k
  }
996
1.79k
  return stream_len;
997
1.79k
}
998
999
1000
/**
1001
 * @brief compress L_FX_NCOB data
1002
 *
1003
 * @param cfg   pointer to the compression configuration structure
1004
 * @param stream_len  already used length of the bitstream in bits
1005
 *
1006
 * @returns the bit length of the bitstream on success or an error code if it
1007
 *  fails (which can be tested with cmp_is_error())
1008
 */
1009
1010
static uint32_t compress_l_fx_ncob(const struct cmp_cfg *cfg, uint32_t stream_len)
1011
1.47k
{
1012
1.47k
  size_t i;
1013
1014
1.47k
  const struct l_fx_ncob *data_buf = cfg->src;
1015
1.47k
  const struct l_fx_ncob *model_buf = cfg->model_buf;
1016
1.47k
  struct l_fx_ncob *up_model_buf = NULL;
1017
1.47k
  const struct l_fx_ncob *next_model_p;
1018
1.47k
  struct l_fx_ncob model;
1019
1.47k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob,
1020
1.47k
            setup_fx_var, setup_cob_var;
1021
1022
1.47k
  if (model_mode_is_used(cfg->cmp_mode)) {
1023
222
    model = model_buf[0];
1024
222
    next_model_p = &model_buf[1];
1025
222
    up_model_buf = cfg->updated_model_buf;
1026
1.25k
  } else {
1027
1.25k
    memset(&model, 0, sizeof(model));
1028
1.25k
    next_model_p = data_buf;
1029
1.25k
  }
1030
1031
1.47k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1032
1.47k
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
1033
1.47k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
1034
1.47k
        cfg->round, MAX_USED_BITS.l_fx, cfg);
1035
1.47k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
1036
1.47k
        cfg->round, MAX_USED_BITS.l_ncob, cfg);
1037
  /* we use the cmp_par_fx_cob_variance parameter for fx and cob variance data */
1038
1.47k
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1039
1.47k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1040
1.47k
  configure_encoder_setup(&setup_cob_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1041
1.47k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1042
1043
8.79k
  for (i = 0;; i++) {
1044
8.79k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
1045
8.79k
            stream_len, &setup_exp_flag);
1046
8.79k
    if (cmp_is_error(stream_len))
1047
75
      break;
1048
8.72k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
1049
8.72k
            &setup_fx);
1050
8.72k
    if (cmp_is_error(stream_len))
1051
80
      break;
1052
8.64k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
1053
8.64k
            stream_len, &setup_ncob);
1054
8.64k
    if (cmp_is_error(stream_len))
1055
81
      break;
1056
8.56k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
1057
8.56k
            stream_len, &setup_ncob);
1058
8.56k
    if (cmp_is_error(stream_len))
1059
77
      break;
1060
8.48k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
1061
8.48k
            stream_len, &setup_fx_var);
1062
8.48k
    if (cmp_is_error(stream_len))
1063
82
      break;
1064
8.40k
    stream_len = encode_value(data_buf[i].cob_x_variance, model.cob_x_variance,
1065
8.40k
            stream_len, &setup_cob_var);
1066
8.40k
    if (cmp_is_error(stream_len))
1067
71
      break;
1068
8.33k
    stream_len = encode_value(data_buf[i].cob_y_variance, model.cob_y_variance,
1069
8.33k
            stream_len, &setup_cob_var);
1070
8.33k
    if (cmp_is_error(stream_len))
1071
78
      break;
1072
1073
8.25k
    if (up_model_buf) {
1074
674
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1075
674
        cfg->model_value, setup_exp_flag.lossy_par);
1076
674
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1077
674
        cfg->model_value, setup_fx.lossy_par);
1078
674
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1079
674
        cfg->model_value, setup_ncob.lossy_par);
1080
674
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1081
674
        cfg->model_value, setup_ncob.lossy_par);
1082
674
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1083
674
        cfg->model_value, setup_fx_var.lossy_par);
1084
674
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1085
674
        cfg->model_value, setup_cob_var.lossy_par);
1086
674
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1087
674
        cfg->model_value, setup_cob_var.lossy_par);
1088
674
    }
1089
1090
8.25k
    if (i >= cfg->samples-1)
1091
935
      break;
1092
1093
7.32k
    model = next_model_p[i];
1094
7.32k
  }
1095
1.47k
  return stream_len;
1096
1.47k
}
1097
1098
1099
/**
1100
 * @brief compress L_FX_EFX_NCOB_ECOB data
1101
 *
1102
 * @param cfg   pointer to the compression configuration structure
1103
 * @param stream_len  already used length of the bitstream in bits
1104
 *
1105
 * @returns the bit length of the bitstream on success or an error code if it
1106
 *  fails (which can be tested with cmp_is_error())
1107
 */
1108
1109
static uint32_t compress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, uint32_t stream_len)
1110
3.62k
{
1111
3.62k
  size_t i;
1112
1113
3.62k
  const struct l_fx_efx_ncob_ecob *data_buf = cfg->src;
1114
3.62k
  const struct l_fx_efx_ncob_ecob *model_buf = cfg->model_buf;
1115
3.62k
  struct l_fx_efx_ncob_ecob *up_model_buf = NULL;
1116
3.62k
  const struct l_fx_efx_ncob_ecob *next_model_p;
1117
3.62k
  struct l_fx_efx_ncob_ecob model;
1118
3.62k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob, setup_efx,
1119
3.62k
            setup_ecob, setup_fx_var, setup_cob_var;
1120
1121
3.62k
  if (model_mode_is_used(cfg->cmp_mode)) {
1122
1.20k
    model = model_buf[0];
1123
1.20k
    next_model_p = &model_buf[1];
1124
1.20k
    up_model_buf = cfg->updated_model_buf;
1125
2.41k
  } else {
1126
2.41k
    memset(&model, 0, sizeof(model));
1127
2.41k
    next_model_p = data_buf;
1128
2.41k
  }
1129
1130
3.62k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1131
3.62k
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
1132
3.62k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
1133
3.62k
        cfg->round, MAX_USED_BITS.l_fx, cfg);
1134
3.62k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
1135
3.62k
        cfg->round, MAX_USED_BITS.l_ncob, cfg);
1136
3.62k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
1137
3.62k
        cfg->round, MAX_USED_BITS.l_efx, cfg);
1138
3.62k
  configure_encoder_setup(&setup_ecob, cfg->cmp_par_ecob, cfg->spill_ecob,
1139
3.62k
        cfg->round, MAX_USED_BITS.l_ecob, cfg);
1140
  /* we use compression parameters for both variance data fields */
1141
3.62k
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1142
3.62k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1143
3.62k
  configure_encoder_setup(&setup_cob_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1144
3.62k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1145
1146
6.88k
  for (i = 0;; i++) {
1147
6.88k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
1148
6.88k
            stream_len, &setup_exp_flag);
1149
6.88k
    if (cmp_is_error(stream_len))
1150
40
      break;
1151
6.84k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
1152
6.84k
            &setup_fx);
1153
6.84k
    if (cmp_is_error(stream_len))
1154
41
      break;
1155
6.80k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
1156
6.80k
            stream_len, &setup_ncob);
1157
6.80k
    if (cmp_is_error(stream_len))
1158
54
      break;
1159
6.75k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
1160
6.75k
            stream_len, &setup_ncob);
1161
6.75k
    if (cmp_is_error(stream_len))
1162
81
      break;
1163
6.67k
    stream_len = encode_value(data_buf[i].efx, model.efx,
1164
6.67k
            stream_len, &setup_efx);
1165
6.67k
    if (cmp_is_error(stream_len))
1166
91
      break;
1167
6.58k
    stream_len = encode_value(data_buf[i].ecob_x, model.ecob_x,
1168
6.58k
            stream_len, &setup_ecob);
1169
6.58k
    if (cmp_is_error(stream_len))
1170
120
      break;
1171
6.46k
    stream_len = encode_value(data_buf[i].ecob_y, model.ecob_y,
1172
6.46k
            stream_len, &setup_ecob);
1173
6.46k
    if (cmp_is_error(stream_len))
1174
111
      break;
1175
6.34k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
1176
6.34k
            stream_len, &setup_fx_var);
1177
6.34k
    if (cmp_is_error(stream_len))
1178
175
      break;
1179
6.17k
    stream_len = encode_value(data_buf[i].cob_x_variance, model.cob_x_variance,
1180
6.17k
            stream_len, &setup_cob_var);
1181
6.17k
    if (cmp_is_error(stream_len))
1182
210
      break;
1183
5.96k
    stream_len = encode_value(data_buf[i].cob_y_variance, model.cob_y_variance,
1184
5.96k
            stream_len, &setup_cob_var);
1185
5.96k
    if (cmp_is_error(stream_len))
1186
203
      break;
1187
1188
5.76k
    if (up_model_buf) {
1189
1.84k
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1190
1.84k
        cfg->model_value, setup_exp_flag.lossy_par);
1191
1.84k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1192
1.84k
        cfg->model_value, setup_fx.lossy_par);
1193
1.84k
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1194
1.84k
        cfg->model_value, setup_ncob.lossy_par);
1195
1.84k
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1196
1.84k
        cfg->model_value, setup_ncob.lossy_par);
1197
1.84k
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
1198
1.84k
        cfg->model_value, setup_efx.lossy_par);
1199
1.84k
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
1200
1.84k
        cfg->model_value, setup_ecob.lossy_par);
1201
1.84k
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
1202
1.84k
        cfg->model_value, setup_ecob.lossy_par);
1203
1.84k
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1204
1.84k
        cfg->model_value, setup_fx_var.lossy_par);
1205
1.84k
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1206
1.84k
        cfg->model_value, setup_cob_var.lossy_par);
1207
1.84k
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1208
1.84k
        cfg->model_value, setup_cob_var.lossy_par);
1209
1.84k
    }
1210
1211
5.76k
    if (i >= cfg->samples-1)
1212
2.50k
      break;
1213
1214
3.26k
    model = next_model_p[i];
1215
3.26k
  }
1216
3.62k
  return stream_len;
1217
3.62k
}
1218
1219
1220
/**
1221
 * @brief compress offset data from the normal and fast cameras
1222
 *
1223
 * @param cfg   pointer to the compression configuration structure
1224
 * @param stream_len  already used length of the bitstream in bits
1225
 *
1226
 * @returns the bit length of the bitstream on success or an error code if it
1227
 *  fails (which can be tested with cmp_is_error())
1228
 */
1229
1230
static uint32_t compress_offset(const struct cmp_cfg *cfg, uint32_t stream_len)
1231
3.84k
{
1232
3.84k
  size_t i;
1233
1234
3.84k
  const struct offset *data_buf = cfg->src;
1235
3.84k
  const struct offset *model_buf = cfg->model_buf;
1236
3.84k
  struct offset *up_model_buf = NULL;
1237
3.84k
  const struct offset *next_model_p;
1238
3.84k
  struct offset model;
1239
3.84k
  struct encoder_setup setup_mean, setup_var;
1240
1241
3.84k
  if (model_mode_is_used(cfg->cmp_mode)) {
1242
1.30k
    model = model_buf[0];
1243
1.30k
    next_model_p = &model_buf[1];
1244
1.30k
    up_model_buf = cfg->updated_model_buf;
1245
2.54k
  } else {
1246
2.54k
    memset(&model, 0, sizeof(model));
1247
2.54k
    next_model_p = data_buf;
1248
2.54k
  }
1249
1250
3.84k
  {
1251
3.84k
    unsigned int mean_bits_used, variance_bits_used;
1252
1253
3.84k
    if (cfg->data_type == DATA_TYPE_F_CAM_OFFSET) {
1254
907
      mean_bits_used = MAX_USED_BITS.fc_offset_mean;
1255
907
      variance_bits_used = MAX_USED_BITS.fc_offset_variance;
1256
2.93k
    } else { /* DATA_TYPE_OFFSET */
1257
2.93k
      mean_bits_used = MAX_USED_BITS.nc_offset_mean;
1258
2.93k
      variance_bits_used = MAX_USED_BITS.nc_offset_variance;
1259
2.93k
    }
1260
1261
3.84k
    configure_encoder_setup(&setup_mean, cfg->cmp_par_offset_mean, cfg->spill_offset_mean,
1262
3.84k
          cfg->round, mean_bits_used, cfg);
1263
3.84k
    configure_encoder_setup(&setup_var, cfg->cmp_par_offset_variance, cfg->spill_offset_variance,
1264
3.84k
          cfg->round, variance_bits_used, cfg);
1265
3.84k
  }
1266
1267
11.1k
  for (i = 0;; i++) {
1268
11.1k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1269
11.1k
            stream_len, &setup_mean);
1270
11.1k
    if (cmp_is_error(stream_len))
1271
337
      return stream_len;
1272
10.7k
    stream_len = encode_value(data_buf[i].variance, model.variance,
1273
10.7k
            stream_len, &setup_var);
1274
10.7k
    if (cmp_is_error(stream_len))
1275
616
      return stream_len;
1276
1277
10.1k
    if (up_model_buf) {
1278
3.06k
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1279
3.06k
        cfg->model_value, setup_mean.lossy_par);
1280
3.06k
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance, model.variance,
1281
3.06k
        cfg->model_value, setup_var.lossy_par);
1282
3.06k
    }
1283
1284
10.1k
    if (i >= cfg->samples-1)
1285
2.89k
      break;
1286
1287
7.29k
    model = next_model_p[i];
1288
7.29k
  }
1289
2.89k
  return stream_len;
1290
3.84k
}
1291
1292
1293
/**
1294
 * @brief compress background data from the normal and fast cameras
1295
 *
1296
 * @param cfg   pointer to the compression configuration structure
1297
 * @param stream_len  already used length of the bitstream in bits
1298
 *
1299
 * @returns the bit length of the bitstream on success or an error code if it
1300
 *  fails (which can be tested with cmp_is_error())
1301
 */
1302
1303
static uint32_t compress_background(const struct cmp_cfg *cfg, uint32_t stream_len)
1304
6.75k
{
1305
6.75k
  size_t i;
1306
1307
6.75k
  const struct background *data_buf = cfg->src;
1308
6.75k
  const struct background *model_buf = cfg->model_buf;
1309
6.75k
  struct background *up_model_buf = NULL;
1310
6.75k
  const struct background *next_model_p;
1311
6.75k
  struct background model;
1312
6.75k
  struct encoder_setup setup_mean, setup_var, setup_pix;
1313
1314
6.75k
  if (model_mode_is_used(cfg->cmp_mode)) {
1315
3.39k
    model = model_buf[0];
1316
3.39k
    next_model_p = &model_buf[1];
1317
3.39k
    up_model_buf = cfg->updated_model_buf;
1318
3.39k
  } else {
1319
3.35k
    memset(&model, 0, sizeof(model));
1320
3.35k
    next_model_p = data_buf;
1321
3.35k
  }
1322
1323
6.75k
  {
1324
6.75k
    unsigned int mean_used_bits, varinace_used_bits, pixels_error_used_bits;
1325
1326
6.75k
    if (cfg->data_type == DATA_TYPE_F_CAM_BACKGROUND) {
1327
5.15k
      mean_used_bits = MAX_USED_BITS.fc_background_mean;
1328
5.15k
      varinace_used_bits = MAX_USED_BITS.fc_background_variance;
1329
5.15k
      pixels_error_used_bits = MAX_USED_BITS.fc_background_outlier_pixels;
1330
5.15k
    } else { /* DATA_TYPE_BACKGROUND */
1331
1.59k
      mean_used_bits = MAX_USED_BITS.nc_background_mean;
1332
1.59k
      varinace_used_bits = MAX_USED_BITS.nc_background_variance;
1333
1.59k
      pixels_error_used_bits = MAX_USED_BITS.nc_background_outlier_pixels;
1334
1.59k
    }
1335
6.75k
    configure_encoder_setup(&setup_mean, cfg->cmp_par_background_mean, cfg->spill_background_mean,
1336
6.75k
          cfg->round, mean_used_bits, cfg);
1337
6.75k
    configure_encoder_setup(&setup_var, cfg->cmp_par_background_variance, cfg->spill_background_variance,
1338
6.75k
          cfg->round, varinace_used_bits, cfg);
1339
6.75k
    configure_encoder_setup(&setup_pix, cfg->cmp_par_background_pixels_error, cfg->spill_background_pixels_error,
1340
6.75k
          cfg->round, pixels_error_used_bits, cfg);
1341
6.75k
  }
1342
1343
13.2k
  for (i = 0;; i++) {
1344
13.2k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1345
13.2k
            stream_len, &setup_mean);
1346
13.2k
    if (cmp_is_error(stream_len))
1347
490
      return stream_len;
1348
12.7k
    stream_len = encode_value(data_buf[i].variance, model.variance,
1349
12.7k
            stream_len, &setup_var);
1350
12.7k
    if (cmp_is_error(stream_len))
1351
1.44k
      return stream_len;
1352
11.3k
    stream_len = encode_value(data_buf[i].outlier_pixels, model.outlier_pixels,
1353
11.3k
            stream_len, &setup_pix);
1354
11.3k
    if (cmp_is_error(stream_len))
1355
364
      return stream_len;
1356
1357
10.9k
    if (up_model_buf) {
1358
5.78k
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1359
5.78k
        cfg->model_value, setup_mean.lossy_par);
1360
5.78k
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance, model.variance,
1361
5.78k
        cfg->model_value, setup_var.lossy_par);
1362
5.78k
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels, model.outlier_pixels,
1363
5.78k
        cfg->model_value, setup_pix.lossy_par);
1364
5.78k
    }
1365
1366
10.9k
    if (i >= cfg->samples-1)
1367
4.45k
      break;
1368
1369
6.53k
    model = next_model_p[i];
1370
6.53k
  }
1371
4.45k
  return stream_len;
1372
6.75k
}
1373
1374
1375
/**
1376
 * @brief compress smearing data from the normal cameras
1377
 *
1378
 * @param cfg   pointer to the compression configuration structure
1379
 * @param stream_len  already used length of the bitstream in bits
1380
 *
1381
 * @returns the bit length of the bitstream on success or an error code if it
1382
 *  fails (which can be tested with cmp_is_error())
1383
 */
1384
1385
static uint32_t compress_smearing(const struct cmp_cfg *cfg, uint32_t stream_len)
1386
5.69k
{
1387
5.69k
  size_t i;
1388
1389
5.69k
  const struct smearing *data_buf = cfg->src;
1390
5.69k
  const struct smearing *model_buf = cfg->model_buf;
1391
5.69k
  struct smearing *up_model_buf = NULL;
1392
5.69k
  const struct smearing *next_model_p;
1393
5.69k
  struct smearing model;
1394
5.69k
  struct encoder_setup setup_mean, setup_var_mean, setup_pix;
1395
1396
5.69k
  if (model_mode_is_used(cfg->cmp_mode)) {
1397
3.93k
    model = model_buf[0];
1398
3.93k
    next_model_p = &model_buf[1];
1399
3.93k
    up_model_buf = cfg->updated_model_buf;
1400
3.93k
  } else {
1401
1.76k
    memset(&model, 0, sizeof(model));
1402
1.76k
    next_model_p = data_buf;
1403
1.76k
  }
1404
1405
5.69k
  configure_encoder_setup(&setup_mean, cfg->cmp_par_smearing_mean, cfg->spill_smearing_mean,
1406
5.69k
        cfg->round, MAX_USED_BITS.smearing_mean, cfg);
1407
5.69k
  configure_encoder_setup(&setup_var_mean, cfg->cmp_par_smearing_variance, cfg->spill_smearing_variance,
1408
5.69k
        cfg->round, MAX_USED_BITS.smearing_variance_mean, cfg);
1409
5.69k
  configure_encoder_setup(&setup_pix, cfg->cmp_par_smearing_pixels_error, cfg->spill_smearing_pixels_error,
1410
5.69k
        cfg->round, MAX_USED_BITS.smearing_outlier_pixels, cfg);
1411
1412
14.9k
  for (i = 0;; i++) {
1413
14.9k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1414
14.9k
            stream_len, &setup_mean);
1415
14.9k
    if (cmp_is_error(stream_len))
1416
379
      return stream_len;
1417
14.5k
    stream_len = encode_value(data_buf[i].variance_mean, model.variance_mean,
1418
14.5k
            stream_len, &setup_var_mean);
1419
14.5k
    if (cmp_is_error(stream_len))
1420
260
      return stream_len;
1421
14.3k
    stream_len = encode_value(data_buf[i].outlier_pixels, model.outlier_pixels,
1422
14.3k
            stream_len, &setup_pix);
1423
14.3k
    if (cmp_is_error(stream_len))
1424
216
      return stream_len;
1425
1426
14.1k
    if (up_model_buf) {
1427
9.27k
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1428
9.27k
        cfg->model_value, setup_mean.lossy_par);
1429
9.27k
      up_model_buf[i].variance_mean = cmp_up_model(data_buf[i].variance_mean, model.variance_mean,
1430
9.27k
        cfg->model_value, setup_var_mean.lossy_par);
1431
9.27k
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels, model.outlier_pixels,
1432
9.27k
        cfg->model_value, setup_pix.lossy_par);
1433
9.27k
    }
1434
1435
14.1k
    if (i >= cfg->samples-1)
1436
4.84k
      break;
1437
1438
9.25k
    model = next_model_p[i];
1439
9.25k
  }
1440
4.84k
  return stream_len;
1441
5.69k
}
1442
1443
1444
/**
1445
 * @brief check if two buffers are overlapping
1446
 * @see https://stackoverflow.com/a/325964
1447
 *
1448
 * @param buf_a   start address of the 1st buffer (can be NULL)
1449
 * @param size_a  byte size of the 1st buffer
1450
 * @param buf_b   start address of the 2nd buffer (can be NULL)
1451
 * @param size_b  byte size of the 2nd buffer
1452
 *
1453
 * @returns 0 if buffers are not overlapping, otherwise buffers are
1454
 *  overlapping
1455
 */
1456
1457
static int buffer_overlaps(const void *buf_a, size_t size_a,
1458
         const void *buf_b, size_t size_b)
1459
440k
{
1460
440k
  if (!buf_a)
1461
67.8k
    return 0;
1462
1463
373k
  if (!buf_b)
1464
79.2k
    return 0;
1465
1466
293k
  if ((const char *)buf_a < (const char *)buf_b + size_b &&
1467
293k
      (const char *)buf_b < (const char *)buf_a + size_a)
1468
0
    return 1;
1469
1470
293k
  return 0;
1471
293k
}
1472
1473
1474
/**
1475
 * @brief fill the last part of the bitstream with zeros
1476
 *
1477
 * @param cfg   pointer to the compression configuration structure
1478
 * @param cmp_size  length of the bitstream in bits
1479
 *
1480
 * @returns the bit length of the bitstream on success or an error code if it
1481
 *  fails (which can be tested with cmp_is_error())
1482
 */
1483
1484
static uint32_t pad_bitstream(const struct cmp_cfg *cfg, uint32_t cmp_size)
1485
97.1k
{
1486
97.1k
  unsigned int output_buf_len_bits, n_pad_bits;
1487
1488
97.1k
  if (!cfg->dst)
1489
62.7k
    return cmp_size;
1490
1491
  /* no padding in RAW mode; ALWAYS BIG-ENDIAN */
1492
34.4k
  if (cfg->cmp_mode == CMP_MODE_RAW)
1493
26.0k
    return cmp_size;
1494
1495
  /* maximum length of the bitstream in bits */
1496
8.34k
  output_buf_len_bits = cmp_stream_size_to_bits(cfg->stream_size);
1497
1498
8.34k
  n_pad_bits = 32 - (cmp_size & 0x1FU);
1499
8.34k
  if (n_pad_bits < 32) {
1500
7.41k
    FORWARD_IF_ERROR(put_n_bits32(0, n_pad_bits, cmp_size,
1501
7.41k
         cfg->dst, output_buf_len_bits), "");
1502
7.41k
  }
1503
1504
8.34k
  return cmp_size;
1505
8.34k
}
1506
1507
1508
/**
1509
 * @brief internal data compression function
1510
 * This function can compress all types of collection data (one at a time).
1511
 * This function does not take the header of a collection into account.
1512
 *
1513
 * @param cfg   pointer to the compression configuration structure
1514
 * @param stream_len  already used length of the bitstream in bits
1515
 *
1516
 * @note the validity of the cfg structure is not checked
1517
 *
1518
 * @returns the bit length of the bitstream on success or an error code if it
1519
 *  fails (which can be tested with cmp_is_error())
1520
 */
1521
1522
static uint32_t compress_data_internal(const struct cmp_cfg *cfg, uint32_t stream_len)
1523
187k
{
1524
187k
  uint32_t bitsize = 0;
1525
1526
187k
  FORWARD_IF_ERROR(stream_len, "");
1527
187k
  RETURN_ERROR_IF(cfg == NULL, GENERIC, "");
1528
187k
  RETURN_ERROR_IF(stream_len & 0x7, GENERIC, "The stream_len parameter must be a multiple of 8.");
1529
1530
187k
  if (cfg->samples == 0) /* nothing to compress we are done */
1531
65.5k
    return stream_len;
1532
1533
121k
  if (raw_mode_is_used(cfg->cmp_mode)) {
1534
52.2k
    uint32_t raw_size = cfg->samples * (uint32_t)size_of_a_sample(cfg->data_type);
1535
1536
52.2k
    if (cfg->dst) {
1537
26.0k
      uint32_t offset_bytes = stream_len >> 3;
1538
26.0k
      uint8_t *p = (uint8_t *)cfg->dst + offset_bytes;
1539
26.0k
      uint32_t new_stream_size = offset_bytes + raw_size;
1540
1541
26.0k
      RETURN_ERROR_IF(new_stream_size > cfg->stream_size, SMALL_BUFFER, "");
1542
26.0k
      memcpy(p, cfg->src, raw_size);
1543
26.0k
      RETURN_ERROR_IF(cpu_to_be_data_type(p, raw_size, cfg->data_type),
1544
26.0k
          INT_DATA_TYPE_UNSUPPORTED, "");
1545
26.0k
    }
1546
52.2k
    bitsize += stream_len + raw_size * 8; /* convert to bits */
1547
69.2k
  } else {
1548
69.2k
    switch (cfg->data_type) {
1549
4.49k
    case DATA_TYPE_IMAGETTE:
1550
4.49k
    case DATA_TYPE_IMAGETTE_ADAPTIVE:
1551
24.4k
    case DATA_TYPE_SAT_IMAGETTE:
1552
24.4k
    case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE:
1553
25.3k
    case DATA_TYPE_F_CAM_IMAGETTE:
1554
25.3k
    case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE:
1555
25.3k
      bitsize = compress_imagette(cfg, stream_len);
1556
25.3k
      break;
1557
1558
2.51k
    case DATA_TYPE_S_FX:
1559
2.51k
      bitsize = compress_s_fx(cfg, stream_len);
1560
2.51k
      break;
1561
3.68k
    case DATA_TYPE_S_FX_EFX:
1562
3.68k
      bitsize = compress_s_fx_efx(cfg, stream_len);
1563
3.68k
      break;
1564
3.01k
    case DATA_TYPE_S_FX_NCOB:
1565
3.01k
      bitsize = compress_s_fx_ncob(cfg, stream_len);
1566
3.01k
      break;
1567
3.15k
    case DATA_TYPE_S_FX_EFX_NCOB_ECOB:
1568
3.15k
      bitsize = compress_s_fx_efx_ncob_ecob(cfg, stream_len);
1569
3.15k
      break;
1570
1571
1572
8.30k
    case DATA_TYPE_L_FX:
1573
8.30k
      bitsize = compress_l_fx(cfg, stream_len);
1574
8.30k
      break;
1575
1.79k
    case DATA_TYPE_L_FX_EFX:
1576
1.79k
      bitsize = compress_l_fx_efx(cfg, stream_len);
1577
1.79k
      break;
1578
1.47k
    case DATA_TYPE_L_FX_NCOB:
1579
1.47k
      bitsize = compress_l_fx_ncob(cfg, stream_len);
1580
1.47k
      break;
1581
3.62k
    case DATA_TYPE_L_FX_EFX_NCOB_ECOB:
1582
3.62k
      bitsize = compress_l_fx_efx_ncob_ecob(cfg, stream_len);
1583
3.62k
      break;
1584
1585
2.93k
    case DATA_TYPE_OFFSET:
1586
3.84k
    case DATA_TYPE_F_CAM_OFFSET:
1587
3.84k
      bitsize = compress_offset(cfg, stream_len);
1588
3.84k
      break;
1589
1.59k
    case DATA_TYPE_BACKGROUND:
1590
6.75k
    case DATA_TYPE_F_CAM_BACKGROUND:
1591
6.75k
      bitsize = compress_background(cfg, stream_len);
1592
6.75k
      break;
1593
5.69k
    case DATA_TYPE_SMEARING:
1594
5.69k
      bitsize = compress_smearing(cfg, stream_len);
1595
5.69k
      break;
1596
1597
0
    case DATA_TYPE_F_FX:
1598
0
    case DATA_TYPE_F_FX_EFX:
1599
0
    case DATA_TYPE_F_FX_NCOB:
1600
0
    case DATA_TYPE_F_FX_EFX_NCOB_ECOB:
1601
0
    case DATA_TYPE_CHUNK:
1602
0
    case DATA_TYPE_UNKNOWN:
1603
0
    default:
1604
0
      RETURN_ERROR(INT_DATA_TYPE_UNSUPPORTED, "");
1605
69.2k
    }
1606
69.2k
  }
1607
1608
121k
  if (cmp_is_error(bitsize))
1609
24.3k
    return bitsize;
1610
1611
97.1k
  bitsize = pad_bitstream(cfg, bitsize);
1612
1613
97.1k
  return bitsize;
1614
121k
}
1615
1616
1617
/**
1618
 * @brief check if the ICU buffer parameters are invalid
1619
 *
1620
 * @param cfg pointer to the compressor configuration to check
1621
 *
1622
 * @returns 0 if the buffer parameters are valid, otherwise invalid
1623
 */
1624
1625
static uint32_t check_compression_buffers(const struct cmp_cfg *cfg)
1626
116k
{
1627
116k
  size_t data_size;
1628
1629
116k
  RETURN_ERROR_IF(cfg == NULL, GENERIC, "");
1630
1631
116k
  RETURN_ERROR_IF(cfg->src == NULL, CHUNK_NULL, "");
1632
1633
116k
  data_size = size_of_a_sample(cfg->data_type) * cfg->samples;
1634
1635
116k
  if (cfg->samples == 0)
1636
44.4k
    debug_print("Warning: The samples parameter is 0. No data are compressed. This behavior may not be intended.");
1637
1638
116k
  RETURN_ERROR_IF(buffer_overlaps(cfg->dst, cfg->stream_size,
1639
116k
          cfg->src, data_size), PAR_BUFFERS,
1640
116k
    "The compressed data buffer and the data to compress buffer are overlapping.");
1641
1642
116k
  if (model_mode_is_used(cfg->cmp_mode)) {
1643
80.9k
    RETURN_ERROR_IF(cfg->model_buf == NULL, PAR_NO_MODEL, "");
1644
1645
80.9k
    RETURN_ERROR_IF(buffer_overlaps(cfg->model_buf, data_size,
1646
80.9k
            cfg->src, data_size), PAR_BUFFERS,
1647
80.9k
        "The model buffer and the data to compress buffer are overlapping.");
1648
80.9k
    RETURN_ERROR_IF(buffer_overlaps(cfg->model_buf, data_size,
1649
80.9k
            cfg->dst, cfg->stream_size), PAR_BUFFERS,
1650
80.9k
        "The model buffer and the compressed data buffer are overlapping.");
1651
1652
80.9k
    RETURN_ERROR_IF(buffer_overlaps(cfg->updated_model_buf, data_size,
1653
80.9k
            cfg->src, data_size), PAR_BUFFERS,
1654
80.9k
        "The updated model buffer and the data to compress buffer are overlapping.");
1655
80.9k
    RETURN_ERROR_IF(buffer_overlaps(cfg->updated_model_buf, data_size,
1656
80.9k
            cfg->dst, cfg->stream_size), PAR_BUFFERS,
1657
80.9k
        "The updated model buffer and the compressed data buffer are overlapping.");
1658
80.9k
  }
1659
1660
116k
  return CMP_ERROR(NO_ERROR);
1661
116k
}
1662
1663
1664
/**
1665
 * @brief checks if the ICU compression configuration is valid
1666
 *
1667
 * @param cfg pointer to the cmp_cfg structure to be validated
1668
 *
1669
 * @returns an error code if any of the configuration parameters are invalid,
1670
 *  otherwise returns CMP_ERROR_NO_ERROR on valid configuration
1671
 */
1672
1673
static uint32_t cmp_cfg_icu_is_invalid_error_code(const struct cmp_cfg *cfg)
1674
117k
{
1675
1676
117k
  RETURN_ERROR_IF(cmp_cfg_gen_par_is_invalid(cfg), PAR_GENERIC, "");
1677
1678
117k
  if (cmp_imagette_data_type_is_used(cfg->data_type))
1679
27.8k
    RETURN_ERROR_IF(cmp_cfg_imagette_is_invalid(cfg), PAR_SPECIFIC, "");
1680
89.2k
  else if (cmp_fx_cob_data_type_is_used(cfg->data_type))
1681
66.8k
    RETURN_ERROR_IF(cmp_cfg_fx_cob_is_invalid(cfg), PAR_SPECIFIC, "");
1682
22.4k
  else
1683
22.4k
    RETURN_ERROR_IF(cmp_cfg_aux_is_invalid(cfg), PAR_SPECIFIC, "");
1684
1685
116k
  FORWARD_IF_ERROR(check_compression_buffers(cfg), "");
1686
1687
116k
  return CMP_ERROR(NO_ERROR);
1688
116k
}
1689
1690
1691
/**
1692
 * @brief calculate the optimal spill threshold value for zero escape mechanism
1693
 *
1694
 * @param golomb_par  Golomb parameter
1695
 * @param max_data_bits maximum number of used data bits
1696
 *
1697
 * @returns the highest optimal spill threshold value for a given Golomb
1698
 *  parameter, when the zero escape mechanism is used or 0 if the
1699
 *  Golomb parameter is not valid
1700
 */
1701
1702
static uint32_t cmp_best_zero_spill(uint32_t golomb_par, uint32_t max_data_bits)
1703
16.9k
{
1704
16.9k
  uint32_t const max_spill = cmp_icu_max_spill(golomb_par);
1705
16.9k
  uint32_t cutoff;
1706
16.9k
  uint32_t spill;
1707
1708
16.9k
  if (golomb_par < MIN_NON_IMA_GOLOMB_PAR)
1709
1.37k
    return 0;
1710
15.5k
  if (golomb_par > MAX_NON_IMA_GOLOMB_PAR)
1711
1.28k
    return 0;
1712
1713
14.2k
  cutoff = (0x2U << ilog_2(golomb_par)) - golomb_par;
1714
14.2k
  spill = max_data_bits * golomb_par + cutoff;
1715
14.2k
  if (spill > max_spill)
1716
11.8k
    spill = max_spill;
1717
1718
14.2k
  return spill;
1719
15.5k
}
1720
1721
1722
/**
1723
 * @brief estimate a "good" spillover threshold parameter
1724
 *
1725
 * @param golomb_par  Golomb parameter
1726
 * @param cmp_mode  compression mode
1727
 * @param max_data_bits maximum number of used data bits
1728
 *
1729
 * @returns a spillover threshold parameter or 0 if the Golomb parameter is not
1730
 *  valid
1731
 */
1732
1733
static uint32_t cmp_get_spill(uint32_t golomb_par, enum cmp_mode cmp_mode,
1734
            uint32_t max_data_bits)
1735
28.4k
{
1736
28.4k
  if (zero_escape_mech_is_used(cmp_mode))
1737
16.9k
    return cmp_best_zero_spill(golomb_par, max_data_bits);
1738
1739
11.5k
  return cmp_icu_max_spill(golomb_par);
1740
28.4k
}
1741
1742
1743
/**
1744
 * @brief set the compressed collection size field
1745
 *
1746
 * @param cmp_col_size_field  pointer to the compressed collection size field
1747
 * @param cmp_col_size    size of the compressed collection (not including
1748
 *        the compressed collection header size and the
1749
 *        size of the compressed collection size field
1750
 *        itself)
1751
 *
1752
 * @returns error code
1753
 */
1754
1755
static uint32_t set_cmp_col_size(uint8_t *cmp_col_size_field, uint32_t cmp_col_size)
1756
53.3k
{
1757
53.3k
  uint16_t const v = cpu_to_be16((uint16_t)cmp_col_size);
1758
1759
53.3k
  RETURN_ERROR_IF(cmp_col_size > UINT16_MAX, INT_CMP_COL_TOO_LARGE,
1760
53.3k
      "%"PRIu32" is bigger than the maximum allowed compression collection size", cmp_col_size_field);
1761
1762
53.3k
  memcpy(cmp_col_size_field, &v, CMP_COLLECTION_FILD_SIZE);
1763
1764
53.3k
  return 0;
1765
53.3k
}
1766
1767
1768
/**
1769
 * @brief compresses a collection (with a collection header followed by data)
1770
 *
1771
 * @param col   pointer to a collection header
1772
 * @param model   pointer to the model to be used for compression, or NULL
1773
 *      if not applicable
1774
 * @param updated_model pointer to the buffer where the updated model will be
1775
 *      stored, or NULL if not applicable
1776
 * @param dst   pointer to the buffer where the compressed data will be
1777
 *      stored, or NULL to only get the compressed data size
1778
 * @param dst_capacity  the size of the dst buffer in bytes
1779
 * @param cfg   pointer to a compression configuration
1780
 * @param dst_size  the already used size of the dst buffer in bytes
1781
 *
1782
 * @returns the size of the compressed data in bytes (new dst_size) on
1783
 *  success or an error code if it fails (which can be tested with
1784
 *  cmp_is_error())
1785
 */
1786
static uint32_t cmp_collection(const uint8_t *col,
1787
             const uint8_t *model, uint8_t *updated_model,
1788
             uint32_t *dst, uint32_t dst_capacity,
1789
             struct cmp_cfg *cfg, uint32_t dst_size)
1790
114k
{
1791
114k
  uint32_t const dst_size_begin = dst_size;
1792
114k
  uint32_t dst_size_bits;
1793
114k
  const struct collection_hdr *col_hdr = (const struct collection_hdr *)col;
1794
114k
  uint16_t const col_data_length = cmp_col_get_data_length(col_hdr);
1795
114k
  uint16_t sample_size;
1796
1797
  /* sanity check of the collection header */
1798
114k
  cfg->data_type = convert_subservice_to_cmp_data_type(cmp_col_get_subservice(col_hdr));
1799
114k
  sample_size = (uint16_t)size_of_a_sample(cfg->data_type);
1800
114k
  RETURN_ERROR_IF(col_data_length % sample_size, COL_SIZE_INCONSISTENT,
1801
114k
      "col_data_length: %u %% sample_size: %u != 0", col_data_length, sample_size);
1802
114k
  cfg->samples = col_data_length/sample_size;
1803
1804
  /* prepare the different buffers */
1805
114k
  cfg->src = col + COLLECTION_HDR_SIZE;
1806
114k
  if (model)
1807
80.7k
    cfg->model_buf = model + COLLECTION_HDR_SIZE;
1808
114k
  if (updated_model)
1809
98.2k
    cfg->updated_model_buf = updated_model + COLLECTION_HDR_SIZE;
1810
114k
  cfg->dst = dst;
1811
114k
  cfg->stream_size = dst_capacity;
1812
114k
  FORWARD_IF_ERROR(cmp_cfg_icu_is_invalid_error_code(cfg), "");
1813
1814
113k
  if (cfg->cmp_mode != CMP_MODE_RAW) {
1815
    /* hear we reserve space for the compressed data size field */
1816
108k
    dst_size += CMP_COLLECTION_FILD_SIZE;
1817
108k
  }
1818
1819
  /* we do not compress the collection header, we simply copy the header
1820
   * into the compressed data
1821
   */
1822
113k
  if (dst) {
1823
56.6k
    RETURN_ERROR_IF(dst_size + COLLECTION_HDR_SIZE > dst_capacity,
1824
56.6k
        SMALL_BUFFER, "");
1825
56.5k
    memcpy((uint8_t *)dst + dst_size, col, COLLECTION_HDR_SIZE);
1826
56.5k
  }
1827
113k
  dst_size += COLLECTION_HDR_SIZE;
1828
113k
  if (model_mode_is_used(cfg->cmp_mode) && updated_model)
1829
76.0k
    memcpy(updated_model, col, COLLECTION_HDR_SIZE);
1830
1831
  /* is enough capacity in the dst buffer to store the data uncompressed */
1832
113k
  if ((dst == NULL || dst_capacity >= dst_size + col_data_length) &&
1833
113k
      cfg->cmp_mode != CMP_MODE_RAW) {
1834
    /* we set the compressed buffer size to the data size -1 to provoke
1835
     * a CMP_ERROR_SMALL_BUFFER error if the data are not compressible
1836
     */
1837
108k
    cfg->stream_size = dst_size + col_data_length - 1;
1838
108k
    dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1839
1840
108k
    if (cmp_get_error_code(dst_size_bits) == CMP_ERROR_SMALL_BUFFER ||
1841
108k
        (!dst && dst_size_bits > cmp_stream_size_to_bits(cfg->stream_size))) { /* if dst == NULL compress_data_internal will not return a CMP_ERROR_SMALL_BUFFER */
1842
      /* can not compress the data with the given parameters;
1843
       * put them uncompressed (raw) into the dst buffer */
1844
70.2k
      enum cmp_mode cmp_mode_cpy = cfg->cmp_mode;
1845
1846
70.2k
      cfg->stream_size = dst_size + col_data_length;
1847
70.2k
      cfg->cmp_mode = CMP_MODE_RAW;
1848
70.2k
      dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1849
70.2k
      cfg->cmp_mode = cmp_mode_cpy;
1850
      /* updated model is in this case a copy of the data to compress */
1851
70.2k
      if (model_mode_is_used(cfg->cmp_mode) && cfg->updated_model_buf)
1852
48.2k
        memcpy(cfg->updated_model_buf, cfg->src, col_data_length);
1853
70.2k
    }
1854
108k
  } else {
1855
5.58k
    cfg->stream_size = dst_capacity;
1856
5.58k
    dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1857
5.58k
  }
1858
113k
  FORWARD_IF_ERROR(dst_size_bits, "compression failed");
1859
1860
113k
  dst_size = cmp_bit_to_byte(dst_size_bits);
1861
113k
  if (cfg->cmp_mode != CMP_MODE_RAW && dst) {
1862
53.3k
    uint8_t *cmp_col_size_field = (uint8_t *)dst+dst_size_begin;
1863
53.3k
    uint32_t cmp_col_size = dst_size - dst_size_begin -
1864
53.3k
      COLLECTION_HDR_SIZE - CMP_COLLECTION_FILD_SIZE;
1865
1866
53.3k
    FORWARD_IF_ERROR(set_cmp_col_size(cmp_col_size_field, cmp_col_size), "");
1867
53.3k
  }
1868
1869
113k
  return dst_size;
1870
113k
}
1871
1872
1873
/**
1874
 * @brief builds a compressed entity header for a compressed chunk
1875
 *
1876
 * @param entity    start address of the compression entity header
1877
 *        (can be NULL if you only want the entity header
1878
 *        size)
1879
 * @param chunk_size    the original size of the chunk in bytes
1880
 * @param cfg     pointer to the compression configuration used to
1881
 *        compress the chunk
1882
 * @param start_timestamp the start timestamp of the chunk compression
1883
 * @param cmp_ent_size_byte the size of the compression entity (entity
1884
 *        header plus compressed data)
1885
 *
1886
 * @return the size of the compressed entity header in bytes or an error code
1887
 *  if it fails (which can be tested with cmp_is_error())
1888
 */
1889
1890
static uint32_t cmp_ent_build_chunk_header(uint32_t *entity, uint32_t chunk_size,
1891
             const struct cmp_cfg *cfg, uint64_t start_timestamp,
1892
             uint32_t cmp_ent_size_byte)
1893
11.3k
{
1894
11.3k
  if (entity) { /* setup the compressed entity header */
1895
2.14k
    struct cmp_entity *ent = (struct cmp_entity *)entity;
1896
2.14k
    int err = 0;
1897
1898
2.14k
    err |= cmp_ent_set_version_id(ent, version_identifier); /* set by compress_chunk_init */
1899
2.14k
    err |= cmp_ent_set_size(ent, cmp_ent_size_byte);
1900
2.14k
    err |= cmp_ent_set_original_size(ent, chunk_size);
1901
2.14k
    err |= cmp_ent_set_data_type(ent, DATA_TYPE_CHUNK, cfg->cmp_mode == CMP_MODE_RAW);
1902
2.14k
    err |= cmp_ent_set_cmp_mode(ent, cfg->cmp_mode);
1903
2.14k
    err |= cmp_ent_set_model_value(ent, cfg->model_value);
1904
    /* model id/counter are set by the user with the compress_chunk_set_model_id_and_counter() */
1905
2.14k
    err |= cmp_ent_set_model_id(ent, 0);
1906
2.14k
    err |= cmp_ent_set_model_counter(ent, 0);
1907
2.14k
    err |= cmp_ent_set_reserved(ent, 0);
1908
2.14k
    err |= cmp_ent_set_lossy_cmp_par(ent, cfg->round);
1909
2.14k
    if (cfg->cmp_mode != CMP_MODE_RAW) {
1910
1.80k
      err |= cmp_ent_set_non_ima_spill1(ent, cfg->spill_par_1);
1911
1.80k
      err |= cmp_ent_set_non_ima_cmp_par1(ent, cfg->cmp_par_1);
1912
1.80k
      err |= cmp_ent_set_non_ima_spill2(ent, cfg->spill_par_2);
1913
1.80k
      err |= cmp_ent_set_non_ima_cmp_par2(ent, cfg->cmp_par_2);
1914
1.80k
      err |= cmp_ent_set_non_ima_spill3(ent, cfg->spill_par_3);
1915
1.80k
      err |= cmp_ent_set_non_ima_cmp_par3(ent, cfg->cmp_par_3);
1916
1.80k
      err |= cmp_ent_set_non_ima_spill4(ent, cfg->spill_par_4);
1917
1.80k
      err |= cmp_ent_set_non_ima_cmp_par4(ent, cfg->cmp_par_4);
1918
1.80k
      err |= cmp_ent_set_non_ima_spill5(ent, cfg->spill_par_5);
1919
1.80k
      err |= cmp_ent_set_non_ima_cmp_par5(ent, cfg->cmp_par_5);
1920
1.80k
      err |= cmp_ent_set_non_ima_spill6(ent, cfg->spill_par_6);
1921
1.80k
      err |= cmp_ent_set_non_ima_cmp_par6(ent, cfg->cmp_par_6);
1922
1.80k
    }
1923
2.14k
    RETURN_ERROR_IF(err, ENTITY_HEADER, "");
1924
1.65k
    RETURN_ERROR_IF(cmp_ent_set_start_timestamp(ent, start_timestamp),
1925
1.65k
        ENTITY_TIMESTAMP, "");
1926
1.65k
    RETURN_ERROR_IF(cmp_ent_set_end_timestamp(ent, get_timestamp()),
1927
1.65k
        ENTITY_TIMESTAMP, "");
1928
1.65k
  }
1929
1930
10.8k
  if (cfg->cmp_mode == CMP_MODE_RAW)
1931
1.08k
    return GENERIC_HEADER_SIZE;
1932
9.73k
  else
1933
9.73k
    return NON_IMAGETTE_HEADER_SIZE;
1934
10.8k
}
1935
1936
1937
/**
1938
 * @brief Set the compression configuration from the compression parameters
1939
 *  based on the chunk type of the collection
1940
 *
1941
 * @param[in] col pointer to a collection header
1942
 * @param[in] par pointer to a compression parameters struct
1943
 * @param[out] cfg  pointer to a compression configuration
1944
 *
1945
 * @returns the chunk type of the collection
1946
 */
1947
1948
static enum chunk_type init_cmp_cfg_from_cmp_par(const struct collection_hdr *col,
1949
             const struct cmp_par *par,
1950
             struct cmp_cfg *cfg)
1951
6.21k
{
1952
6.21k
  enum chunk_type chunk_type = cmp_col_get_chunk_type(col);
1953
1954
6.21k
  memset(cfg, 0, sizeof(struct cmp_cfg));
1955
1956
  /* the ranges of the parameters are checked in cmp_cfg_icu_is_invalid_error_code() */
1957
6.21k
  cfg->cmp_mode = par->cmp_mode;
1958
6.21k
  cfg->model_value = par->model_value;
1959
6.21k
  if (par->lossy_par)
1960
4.23k
    debug_print("Warning: lossy compression is not supported for chunk compression, lossy_par will be ignored.");
1961
6.21k
  cfg->round = 0;
1962
1963
6.21k
  switch (chunk_type) {
1964
322
  case CHUNK_TYPE_NCAM_IMAGETTE:
1965
322
    cfg->cmp_par_imagette = par->nc_imagette;
1966
322
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette, cfg->cmp_mode,
1967
322
                MAX_USED_BITS.nc_imagette);
1968
322
    break;
1969
697
  case CHUNK_TYPE_SAT_IMAGETTE:
1970
697
    cfg->cmp_par_imagette = par->saturated_imagette;
1971
697
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette, cfg->cmp_mode,
1972
697
                MAX_USED_BITS.saturated_imagette);
1973
697
    break;
1974
1.83k
  case CHUNK_TYPE_SHORT_CADENCE:
1975
1.83k
    cfg->cmp_par_exp_flags = par->s_exp_flags;
1976
1.83k
    cfg->spill_exp_flags = cmp_get_spill(cfg->cmp_par_exp_flags, cfg->cmp_mode,
1977
1.83k
                 MAX_USED_BITS.s_exp_flags);
1978
1.83k
    cfg->cmp_par_fx = par->s_fx;
1979
1.83k
    cfg->spill_fx = cmp_get_spill(cfg->cmp_par_fx, cfg->cmp_mode,
1980
1.83k
                MAX_USED_BITS.s_fx);
1981
1.83k
    cfg->cmp_par_ncob = par->s_ncob;
1982
1.83k
    cfg->spill_ncob = cmp_get_spill(cfg->cmp_par_ncob, cfg->cmp_mode,
1983
1.83k
            MAX_USED_BITS.s_ncob);
1984
1.83k
    cfg->cmp_par_efx = par->s_efx;
1985
1.83k
    cfg->spill_efx = cmp_get_spill(cfg->cmp_par_efx, cfg->cmp_mode,
1986
1.83k
                 MAX_USED_BITS.s_efx);
1987
1.83k
    cfg->cmp_par_ecob = par->s_ecob;
1988
1.83k
    cfg->spill_ecob = cmp_get_spill(cfg->cmp_par_ecob, cfg->cmp_mode,
1989
1.83k
            MAX_USED_BITS.s_ecob);
1990
1.83k
    break;
1991
1.72k
  case CHUNK_TYPE_LONG_CADENCE:
1992
1.72k
    cfg->cmp_par_exp_flags = par->l_exp_flags;
1993
1.72k
    cfg->spill_exp_flags = cmp_get_spill(cfg->cmp_par_exp_flags, cfg->cmp_mode,
1994
1.72k
                 MAX_USED_BITS.l_exp_flags);
1995
1.72k
    cfg->cmp_par_fx = par->l_fx;
1996
1.72k
    cfg->spill_fx = cmp_get_spill(cfg->cmp_par_fx, cfg->cmp_mode,
1997
1.72k
                MAX_USED_BITS.l_fx);
1998
1.72k
    cfg->cmp_par_ncob = par->l_ncob;
1999
1.72k
    cfg->spill_ncob = cmp_get_spill(cfg->cmp_par_ncob, cfg->cmp_mode,
2000
1.72k
            MAX_USED_BITS.l_ncob);
2001
1.72k
    cfg->cmp_par_efx = par->l_efx;
2002
1.72k
    cfg->spill_efx = cmp_get_spill(cfg->cmp_par_efx, cfg->cmp_mode,
2003
1.72k
                 MAX_USED_BITS.l_efx);
2004
1.72k
    cfg->cmp_par_ecob = par->l_ecob;
2005
1.72k
    cfg->spill_ecob = cmp_get_spill(cfg->cmp_par_ecob, cfg->cmp_mode,
2006
1.72k
            MAX_USED_BITS.l_ecob);
2007
1.72k
    cfg->cmp_par_fx_cob_variance = par->l_fx_cob_variance;
2008
1.72k
    cfg->spill_fx_cob_variance = cmp_get_spill(cfg->cmp_par_fx_cob_variance,
2009
1.72k
                 cfg->cmp_mode, MAX_USED_BITS.l_fx_cob_variance);
2010
1.72k
    break;
2011
432
  case CHUNK_TYPE_OFFSET_BACKGROUND:
2012
432
    cfg->cmp_par_offset_mean = par->nc_offset_mean;
2013
432
    cfg->spill_offset_mean = cmp_get_spill(cfg->cmp_par_offset_mean,
2014
432
            cfg->cmp_mode, MAX_USED_BITS.nc_offset_mean);
2015
432
    cfg->cmp_par_offset_variance = par->nc_offset_variance;
2016
432
    cfg->spill_offset_variance = cmp_get_spill(cfg->cmp_par_offset_variance,
2017
432
            cfg->cmp_mode, MAX_USED_BITS.nc_offset_variance);
2018
432
    cfg->cmp_par_background_mean = par->nc_background_mean;
2019
432
    cfg->spill_background_mean = cmp_get_spill(cfg->cmp_par_background_mean,
2020
432
            cfg->cmp_mode, MAX_USED_BITS.nc_background_mean);
2021
432
    cfg->cmp_par_background_variance = par->nc_background_variance;
2022
432
    cfg->spill_background_variance = cmp_get_spill(cfg->cmp_par_background_variance,
2023
432
            cfg->cmp_mode, MAX_USED_BITS.nc_background_variance);
2024
432
    cfg->cmp_par_background_pixels_error = par->nc_background_outlier_pixels;
2025
432
    cfg->spill_background_pixels_error = cmp_get_spill(cfg->cmp_par_background_pixels_error,
2026
432
            cfg->cmp_mode, MAX_USED_BITS.nc_background_outlier_pixels);
2027
432
    break;
2028
2029
463
  case CHUNK_TYPE_SMEARING:
2030
463
    cfg->cmp_par_smearing_mean = par->smearing_mean;
2031
463
    cfg->spill_smearing_mean = cmp_get_spill(cfg->cmp_par_smearing_mean,
2032
463
            cfg->cmp_mode, MAX_USED_BITS.smearing_mean);
2033
463
    cfg->cmp_par_smearing_variance = par->smearing_variance_mean;
2034
463
    cfg->spill_smearing_variance = cmp_get_spill(cfg->cmp_par_smearing_variance,
2035
463
            cfg->cmp_mode, MAX_USED_BITS.smearing_variance_mean);
2036
463
    cfg->cmp_par_smearing_pixels_error = par->smearing_outlier_pixels;
2037
463
    cfg->spill_smearing_pixels_error = cmp_get_spill(cfg->cmp_par_smearing_pixels_error,
2038
463
            cfg->cmp_mode, MAX_USED_BITS.smearing_outlier_pixels);
2039
463
    break;
2040
2041
725
  case CHUNK_TYPE_F_CHAIN:
2042
725
    cfg->cmp_par_imagette = par->fc_imagette;
2043
725
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette,
2044
725
            cfg->cmp_mode, MAX_USED_BITS.fc_imagette);
2045
2046
725
    cfg->cmp_par_offset_mean = par->fc_offset_mean;
2047
725
    cfg->spill_offset_mean = cmp_get_spill(cfg->cmp_par_offset_mean,
2048
725
            cfg->cmp_mode, MAX_USED_BITS.fc_offset_mean);
2049
725
    cfg->cmp_par_offset_variance = par->fc_offset_variance;
2050
725
    cfg->spill_offset_variance = cmp_get_spill(cfg->cmp_par_offset_variance,
2051
725
            cfg->cmp_mode, MAX_USED_BITS.fc_offset_variance);
2052
2053
725
    cfg->cmp_par_background_mean = par->fc_background_mean;
2054
725
    cfg->spill_background_mean = cmp_get_spill(cfg->cmp_par_background_mean,
2055
725
            cfg->cmp_mode, MAX_USED_BITS.fc_background_mean);
2056
725
    cfg->cmp_par_background_variance = par->fc_background_variance;
2057
725
    cfg->spill_background_variance = cmp_get_spill(cfg->cmp_par_background_variance,
2058
725
            cfg->cmp_mode, MAX_USED_BITS.fc_background_variance);
2059
725
    cfg->cmp_par_background_pixels_error = par->fc_background_outlier_pixels;
2060
725
    cfg->spill_background_pixels_error = cmp_get_spill(cfg->cmp_par_background_pixels_error,
2061
725
            cfg->cmp_mode, MAX_USED_BITS.fc_background_outlier_pixels);
2062
725
    break;
2063
18
  case CHUNK_TYPE_UNKNOWN:
2064
18
  default: /*
2065
      * default case never reached because cmp_col_get_chunk_type
2066
      * returns CHUNK_TYPE_UNKNOWN if the type is unknown
2067
      */
2068
18
    chunk_type = CHUNK_TYPE_UNKNOWN;
2069
18
    break;
2070
6.21k
  }
2071
2072
6.21k
  return chunk_type;
2073
6.21k
}
2074
2075
2076
/**
2077
 * @brief initialise the compress_chunk() function
2078
 *
2079
 * If not initialised the compress_chunk() function sets the timestamps and
2080
 * version_id in the compression entity header to zero
2081
 *
2082
 * @param return_timestamp  pointer to a function returning a current 48-bit
2083
 *        timestamp
2084
 * @param version_id    application software version identifier
2085
 */
2086
2087
void compress_chunk_init(uint64_t (*return_timestamp)(void), uint32_t version_id)
2088
285
{
2089
285
  if (return_timestamp)
2090
285
    get_timestamp = return_timestamp;
2091
2092
285
  version_identifier = version_id;
2093
285
}
2094
2095
2096
/**
2097
 * @brief compress a data chunk consisting of put together data collections
2098
 *
2099
 * @param chunk     pointer to the chunk to be compressed
2100
 * @param chunk_size    byte size of the chunk
2101
 * @param chunk_model   pointer to a model of a chunk; has the same size
2102
 *        as the chunk (can be NULL if no model compression
2103
 *        mode is used)
2104
 * @param updated_chunk_model pointer to store the updated model for the next
2105
 *        model mode compression; has the same size as the
2106
 *        chunk (can be the same as the model_of_data
2107
 *        buffer for in-place update or NULL if updated
2108
 *        model is not needed)
2109
 * @param dst     destination pointer to the compressed data
2110
 *        buffer; has to be 4-byte aligned; can be NULL to
2111
 *        only get the compressed data size
2112
 * @param dst_capacity    capacity of the dst buffer; it's recommended to
2113
 *        provide a dst_capacity >=
2114
 *        compress_chunk_cmp_size_bound(chunk, chunk_size)
2115
 *        as it eliminates one potential failure scenario:
2116
 *        not enough space in the dst buffer to write the
2117
 *        compressed data; size is internally rounded down
2118
 *        to a multiple of 4
2119
 * @param cmp_par   pointer to a compression parameters struct
2120
 * @returns the byte size of the compressed data or an error code if it
2121
 *  fails (which can be tested with cmp_is_error())
2122
 */
2123
2124
uint32_t compress_chunk(const void *chunk, uint32_t chunk_size,
2125
      const void *chunk_model, void *updated_chunk_model,
2126
      uint32_t *dst, uint32_t dst_capacity,
2127
      const struct cmp_par *cmp_par)
2128
6.50k
{
2129
6.50k
  uint64_t const start_timestamp = get_timestamp();
2130
6.50k
  const struct collection_hdr *col = (const struct collection_hdr *)chunk;
2131
6.50k
  enum chunk_type chunk_type;
2132
6.50k
  struct cmp_cfg cfg;
2133
6.50k
  uint32_t cmp_size_byte; /* size of the compressed data in bytes */
2134
6.50k
  size_t read_bytes;
2135
2136
6.50k
  RETURN_ERROR_IF(chunk == NULL, CHUNK_NULL, "");
2137
6.50k
  RETURN_ERROR_IF(cmp_par == NULL, PAR_NULL, "");
2138
6.22k
  RETURN_ERROR_IF(chunk_size < COLLECTION_HDR_SIZE, CHUNK_SIZE_INCONSISTENT,
2139
6.22k
      "chunk_size: %"PRIu32"", chunk_size);
2140
6.21k
  RETURN_ERROR_IF(chunk_size > CMP_ENTITY_MAX_ORIGINAL_SIZE, CHUNK_TOO_LARGE,
2141
6.21k
      "chunk_size: %"PRIu32"", chunk_size);
2142
2143
6.21k
  chunk_type = init_cmp_cfg_from_cmp_par(col, cmp_par, &cfg);
2144
6.21k
  RETURN_ERROR_IF(chunk_type == CHUNK_TYPE_UNKNOWN, COL_SUBSERVICE_UNSUPPORTED,
2145
6.21k
      "unsupported subservice: %u", cmp_col_get_subservice(col));
2146
2147
  /* reserve space for the compression entity header, we will build the
2148
   * header after the compression of the chunk
2149
   */
2150
6.20k
  cmp_size_byte = cmp_ent_build_chunk_header(NULL, chunk_size, &cfg, start_timestamp, 0);
2151
6.20k
  RETURN_ERROR_IF(dst && dst_capacity < cmp_size_byte, SMALL_BUFFER,
2152
6.20k
      "dst_capacity must be at least as large as the minimum size of the compression unit.");
2153
2154
2155
  /* compress one collection after another */
2156
6.19k
  for (read_bytes = 0;
2157
119k
       read_bytes <= chunk_size - COLLECTION_HDR_SIZE;
2158
114k
       read_bytes += cmp_col_get_size(col)) {
2159
114k
    const uint8_t *col_model = NULL;
2160
114k
    uint8_t *col_up_model = NULL;
2161
2162
    /* setup pointers for the next collection we want to compress */
2163
114k
    col = (const struct collection_hdr *)((const uint8_t *)chunk + read_bytes);
2164
114k
    if (chunk_model)
2165
80.9k
      col_model = (const uint8_t *)chunk_model + read_bytes;
2166
114k
    if (updated_chunk_model)
2167
98.4k
      col_up_model = (uint8_t *)updated_chunk_model + read_bytes;
2168
2169
114k
    RETURN_ERROR_IF(cmp_col_get_chunk_type(col) != chunk_type, CHUNK_SUBSERVICE_INCONSISTENT, "");
2170
2171
    /* chunk size is inconsistent with the sum of sizes in the collection headers */
2172
114k
    if (read_bytes + cmp_col_get_size(col) > chunk_size)
2173
30
      break;
2174
2175
114k
    cmp_size_byte = cmp_collection((const uint8_t *)col, col_model, col_up_model,
2176
114k
                 dst, dst_capacity, &cfg, cmp_size_byte);
2177
114k
    FORWARD_IF_ERROR(cmp_size_byte, "error occurred when compressing the collection with offset %u", read_bytes);
2178
114k
  }
2179
2180
5.35k
  RETURN_ERROR_IF(read_bytes != chunk_size, CHUNK_SIZE_INCONSISTENT, "");
2181
2182
5.11k
  FORWARD_IF_ERROR(cmp_ent_build_chunk_header(dst, chunk_size, &cfg,
2183
5.11k
              start_timestamp, cmp_size_byte), "");
2184
2185
4.62k
  return cmp_size_byte;
2186
5.11k
}
2187
2188
2189
/**
2190
 * @brief returns the maximum compressed size in a worst-case scenario
2191
 * In case the input data is not compressible
2192
 * This function is primarily useful for memory allocation purposes
2193
 * (destination buffer size).
2194
 *
2195
 * @note if the number of collections is known you can use the
2196
 *  COMPRESS_CHUNK_BOUND macro for compilation-time evaluation
2197
 *  (stack memory allocation for example)
2198
 *
2199
 * @param chunk   pointer to the chunk you want to compress
2200
 * @param chunk_size  size of the chunk in bytes
2201
 *
2202
 * @returns maximum compressed size for a chunk compression on success or an
2203
 *  error code if it fails (which can be tested with cmp_is_error())
2204
 */
2205
2206
uint32_t compress_chunk_cmp_size_bound(const void *chunk, size_t chunk_size)
2207
4.37k
{
2208
4.37k
  int32_t read_bytes;
2209
4.37k
  uint32_t num_col = 0;
2210
4.37k
  size_t bound;
2211
4.37k
  size_t const max_chunk_size = CMP_ENTITY_MAX_ORIGINAL_SIZE
2212
4.37k
    - NON_IMAGETTE_HEADER_SIZE - CMP_COLLECTION_FILD_SIZE;
2213
2214
4.37k
  RETURN_ERROR_IF(chunk == NULL, CHUNK_NULL, "");
2215
4.37k
  RETURN_ERROR_IF(chunk_size < COLLECTION_HDR_SIZE, CHUNK_SIZE_INCONSISTENT, "");
2216
4.12k
  RETURN_ERROR_IF(chunk_size > max_chunk_size, CHUNK_TOO_LARGE,
2217
4.12k
      "chunk_size: %"PRIu32" > max_chunk_size: %"PRIu32"",
2218
4.12k
      chunk_size, max_chunk_size);
2219
2220
  /* count the number of collections in the chunk */
2221
4.12k
  for (read_bytes = 0;
2222
73.0k
       read_bytes <= (int32_t)(chunk_size-COLLECTION_HDR_SIZE);
2223
68.9k
       read_bytes += cmp_col_get_size((const struct collection_hdr *)
2224
68.9k
              ((const uint8_t *)chunk + read_bytes)))
2225
68.9k
    num_col++;
2226
2227
4.12k
  RETURN_ERROR_IF((uint32_t)read_bytes != chunk_size, CHUNK_SIZE_INCONSISTENT, "");
2228
2229
3.57k
  bound = COMPRESS_CHUNK_BOUND_UNSAFE(chunk_size, num_col);
2230
3.57k
  RETURN_ERROR_IF(bound > CMP_ENTITY_MAX_SIZE, CHUNK_TOO_LARGE, "bound: %lu", bound);
2231
2232
3.57k
  return (uint32_t)bound;
2233
3.57k
}
2234
2235
2236
/**
2237
 * @brief set the model id and model counter in the compression entity header
2238
 *
2239
 * @param dst   pointer to the compressed data (starting with a
2240
 *      compression entity header)
2241
 * @param dst_size  byte size of the dst buffer
2242
 * @param model_id  model identifier; for identifying entities that originate
2243
 *      from the same starting model
2244
 * @param model_counter model_counter; counts how many times the model was
2245
 *      updated; for non model mode compression use 0
2246
 *
2247
 * @returns the byte size of the dst buffer (= dst_size) on success or an error
2248
 *  code if it fails (which can be tested with cmp_is_error())
2249
 */
2250
2251
uint32_t compress_chunk_set_model_id_and_counter(void *dst, uint32_t dst_size,
2252
             uint16_t model_id, uint8_t model_counter)
2253
138
{
2254
138
  RETURN_ERROR_IF(dst == NULL, ENTITY_NULL, "");
2255
138
  FORWARD_IF_ERROR(dst_size, "");
2256
138
  RETURN_ERROR_IF(dst_size < GENERIC_HEADER_SIZE, ENTITY_TOO_SMALL,
2257
138
      "dst_size: %"PRIu32"", dst_size);
2258
2259
138
  cmp_ent_set_model_id(dst, model_id);
2260
138
  cmp_ent_set_model_counter(dst, model_counter);
2261
2262
138
  return dst_size;
2263
138
}
2264
2265
2266
/**
2267
 * @brief compress data the same way as the RDCU HW compressor
2268
 *
2269
 * @param rcfg  pointer to a RDCU compression configuration (created with the
2270
 *    rdcu_cfg_create() function, set up with the rdcu_cfg_buffers()
2271
 *    and rdcu_cfg_imagette() functions)
2272
 * @param info  pointer to a compression information structure contains the
2273
 *    metadata of a compression (can be NULL)
2274
 *
2275
 * @returns the bit length of the bitstream on success or an error code if it
2276
 *  fails (which can be tested with cmp_is_error())
2277
 *
2278
 * @warning only the small buffer error in the info.cmp_err field is implemented
2279
 */
2280
2281
uint32_t compress_like_rdcu(const struct rdcu_cfg *rcfg, struct cmp_info *info)
2282
3.02k
{
2283
3.02k
  struct cmp_cfg cfg;
2284
3.02k
  uint32_t cmp_size_bit;
2285
2286
3.02k
  memset(&cfg, 0, sizeof(cfg));
2287
2288
3.02k
  if (info)
2289
175
    memset(info, 0, sizeof(*info));
2290
2291
3.02k
  if (!rcfg)
2292
0
    return compress_data_internal(NULL, 0);
2293
2294
3.02k
  cfg.data_type = DATA_TYPE_IMAGETTE;
2295
2296
3.02k
  cfg.src = rcfg->input_buf;
2297
3.02k
  cfg.model_buf = rcfg->model_buf;
2298
3.02k
  cfg.samples = rcfg->samples;
2299
3.02k
  cfg.stream_size = (rcfg->buffer_length * sizeof(uint16_t));
2300
3.02k
  cfg.cmp_mode = rcfg->cmp_mode;
2301
3.02k
  cfg.model_value = rcfg->model_value;
2302
3.02k
  cfg.round = rcfg->round;
2303
2304
3.02k
  if (info) {
2305
175
    info->cmp_err = 0;
2306
175
    info->cmp_mode_used = (uint8_t)rcfg->cmp_mode;
2307
175
    info->model_value_used = (uint8_t)rcfg->model_value;
2308
175
    info->round_used = (uint8_t)rcfg->round;
2309
175
    info->spill_used = rcfg->spill;
2310
175
    info->golomb_par_used = rcfg->golomb_par;
2311
175
    info->samples_used = rcfg->samples;
2312
175
    info->rdcu_new_model_adr_used = rcfg->rdcu_new_model_adr;
2313
175
    info->rdcu_cmp_adr_used = rcfg->rdcu_buffer_adr;
2314
175
    info->cmp_size = 0;
2315
175
    info->ap1_cmp_size = 0;
2316
175
    info->ap2_cmp_size = 0;
2317
2318
175
    cfg.cmp_par_imagette = rcfg->ap1_golomb_par;
2319
175
    cfg.spill_imagette = rcfg->ap1_spill;
2320
175
    if (cfg.cmp_par_imagette &&
2321
175
        cmp_cfg_icu_is_invalid_error_code(&cfg) == CMP_ERROR_NO_ERROR)
2322
9
      info->ap1_cmp_size = compress_data_internal(&cfg, 0);
2323
2324
2325
175
    cfg.cmp_par_imagette = rcfg->ap2_golomb_par;
2326
175
    cfg.spill_imagette = rcfg->ap2_spill;
2327
175
    if (cfg.cmp_par_imagette &&
2328
175
        cmp_cfg_icu_is_invalid_error_code(&cfg) == CMP_ERROR_NO_ERROR)
2329
30
      info->ap2_cmp_size = compress_data_internal(&cfg, 0);
2330
175
  }
2331
2332
3.02k
  cfg.cmp_par_imagette = rcfg->golomb_par;
2333
3.02k
  cfg.spill_imagette = rcfg->spill;
2334
3.02k
  cfg.updated_model_buf = rcfg->icu_new_model_buf;
2335
3.02k
  cfg.dst = rcfg->icu_output_buf;
2336
2337
3.02k
  FORWARD_IF_ERROR(cmp_cfg_icu_is_invalid_error_code(&cfg), "");
2338
2339
2.99k
  cmp_size_bit = compress_data_internal(&cfg, 0);
2340
2341
2.99k
  if (info) {
2342
138
    if (cmp_get_error_code(cmp_size_bit) == CMP_ERROR_SMALL_BUFFER)
2343
22
      info->cmp_err |= 1UL << 0;/* SMALL_BUFFER_ERR_BIT;*/ /* set small buffer error */
2344
138
    if (cmp_is_error(cmp_size_bit)) {
2345
22
      info->cmp_size = 0;
2346
22
      info->ap1_cmp_size = 0;
2347
22
      info->ap2_cmp_size = 0;
2348
116
    } else {
2349
116
      info->cmp_size = cmp_size_bit;
2350
116
    }
2351
138
  }
2352
2353
2.99k
  return cmp_size_bit;
2354
3.02k
}