Coverage Report

Created: 2025-06-15 00:57

/src/cmp_tool/lib/icu_compress/cmp_icu.c
Line
Count
Source (jump to first uncovered line)
1
/**
2
 * @file   cmp_icu.c
3
 * @author Dominik Loidolt (dominik.loidolt@univie.ac.at)
4
 * @date   2020
5
 *
6
 * @copyright GPLv2
7
 * This program is free software; you can redistribute it and/or modify it
8
 * under the terms and conditions of the GNU General Public License,
9
 * version 2, as published by the Free Software Foundation.
10
 *
11
 * This program is distributed in the hope it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
 * more details.
15
 *
16
 * @brief software compression library
17
 * @see Data Compression User Manual PLATO-UVIE-PL-UM-0001
18
 */
19
20
21
#include <stdint.h>
22
#include <string.h>
23
#include <limits.h>
24
25
#include "../common/byteorder.h"
26
#include "../common/compiler.h"
27
#include "../common/cmp_debug.h"
28
#include "../common/cmp_data_types.h"
29
#include "../common/cmp_support.h"
30
#include "../common/cmp_cal_up_model.h"
31
#include "../common/cmp_max_used_bits.h"
32
#include "../common/cmp_entity.h"
33
#include "../common/cmp_error.h"
34
#include "../common/cmp_error_list.h"
35
#include "../common/leon_inttypes.h"
36
#include "cmp_chunk_type.h"
37
38
#include "../cmp_icu.h"
39
#include "../cmp_chunk.h"
40
41
42
/**
43
 * @brief default implementation of the get_timestamp() function
44
 *
45
 * @returns 0
46
 */
47
48
static uint64_t default_get_timestamp(void)
49
5.67k
{
50
5.67k
  return 0;
51
5.67k
}
52
53
54
/**
55
 * @brief function pointer to a function returning a current PLATO timestamp
56
 *  initialised with the compress_chunk_init() function
57
 */
58
59
static uint64_t (*get_timestamp)(void) = default_get_timestamp;
60
61
62
/**
63
 * @brief holding the version_identifier for the compression header
64
 *  initialised with the compress_chunk_init() function
65
 */
66
67
static uint32_t version_identifier;
68
69
70
/**
71
 * @brief structure to hold a setup to encode a value
72
 */
73
74
struct encoder_setup {
75
  uint32_t (*generate_cw_f)(uint32_t value, uint32_t encoder_par1,
76
          uint32_t encoder_par2, uint32_t *cw); /**< function pointer to a code word encoder */
77
  uint32_t (*encode_method_f)(uint32_t data, uint32_t model, uint32_t stream_len,
78
            const struct encoder_setup *setup); /**< pointer to the encoding function */
79
  uint32_t *bitstream_adr; /**< start address of the compressed data bitstream */
80
  uint32_t max_stream_len; /**< maximum length of the bitstream in bits */
81
  uint32_t encoder_par1;   /**< encoding parameter 1 */
82
  uint32_t encoder_par2;   /**< encoding parameter 2 */
83
  uint32_t spillover_par;  /**< outlier parameter */
84
  uint32_t lossy_par;      /**< lossy compression parameter */
85
  uint32_t max_data_bits;  /**< how many bits are needed to represent the highest possible value */
86
};
87
88
89
/**
90
 * @brief map a signed value into a positive value range
91
 *
92
 * @param value_to_map  signed value to map
93
 * @param max_data_bits how many bits are needed to represent the
94
 *      highest possible value
95
 *
96
 * @returns the positive mapped value
97
 */
98
99
static uint32_t map_to_pos(uint32_t value_to_map, unsigned int max_data_bits)
100
397k
{
101
397k
  uint32_t const mask = (~0U >> (32 - max_data_bits)); /* mask the used bits */
102
397k
  uint32_t result;
103
104
397k
  value_to_map &= mask;
105
397k
  if (value_to_map >> (max_data_bits - 1)) { /* check the leading signed bit */
106
138k
    value_to_map |= ~mask; /* convert to 32-bit signed integer */
107
    /* map negative values to uneven numbers */
108
138k
    result = (-value_to_map) * 2 - 1; /* possible integer overflow is intended */
109
258k
  } else {
110
    /* map positive values to even numbers */
111
258k
    result = value_to_map * 2; /* possible integer overflow is intended */
112
258k
  }
113
114
397k
  return result;
115
397k
}
116
117
118
/**
119
 * @brief put the value of up to 32 bits into a big-endian bitstream
120
 *
121
 * @param value     the value to put into the bitstream
122
 * @param n_bits    number of bits to put into the bitstream
123
 * @param bit_offset    bit index where the bits will be put, seen from
124
 *        the very beginning of the bitstream
125
 * @param bitstream_adr   this is the pointer to the beginning of the
126
 *        bitstream (can be NULL)
127
 * @param max_stream_len  maximum length of the bitstream in *bits*; is
128
 *        ignored if bitstream_adr is NULL
129
 *
130
 * @returns the length of the generated bitstream in bits on success or an error
131
 *          code (which can be tested with cmp_is_error()) in the event of an
132
 *          incorrect input or if the bitstream buffer is too small to put the
133
 *          value in the bitstream.
134
 */
135
136
static uint32_t put_n_bits32(uint32_t value, unsigned int n_bits, uint32_t bit_offset,
137
           uint32_t *bitstream_adr, unsigned int max_stream_len)
138
584k
{
139
  /*
140
   *                               UNSEGMENTED
141
   * |-----------|XXXXXX|---------------|--------------------------------|
142
   * |-bits_left-|n_bits|-------------------bits_right-------------------|
143
   * ^
144
   * local_adr
145
   *                               SEGMENTED
146
   * |-----------------------------|XXX|XXX|-----------------------------|
147
   * |----------bits_left----------|n_bits-|---------bits_right----------|
148
   */
149
584k
  uint32_t const bits_left = bit_offset & 0x1F;
150
584k
  uint32_t const bits_right = 64 - bits_left - n_bits;
151
584k
  uint32_t const shift_left = 32 - n_bits;
152
584k
  uint32_t const stream_len = n_bits + bit_offset; /* no check for overflow */
153
584k
  uint32_t *local_adr;
154
584k
  uint32_t mask, tmp;
155
156
  /* Leave in case of erroneous input */
157
584k
  RETURN_ERROR_IF((int)shift_left < 0, INT_DECODER, "cannot insert more than 32 bits into the bit stream");  /* check n_bits <= 32 */
158
159
584k
  if (n_bits == 0)
160
0
    return stream_len;
161
162
584k
  if (!bitstream_adr)  /* Do we need to write data to the bitstream? */
163
309k
    return stream_len;
164
165
  /* Check if the bitstream buffer is large enough */
166
274k
  if (stream_len > max_stream_len)
167
18.0k
    return CMP_ERROR(SMALL_BUFFER);
168
169
256k
  local_adr = bitstream_adr + (bit_offset >> 5);
170
171
  /* clear the destination with inverse mask */
172
256k
  mask = (0XFFFFFFFFU << shift_left) >> bits_left;
173
256k
  tmp = be32_to_cpu(*local_adr) & ~mask;
174
175
  /* put (the first part of) the value into the bitstream */
176
256k
  tmp |= (value << shift_left) >> bits_left;
177
256k
  *local_adr = cpu_to_be32(tmp);
178
179
  /* Do we need to split the value over two words (SEGMENTED case) */
180
256k
  if (bits_right < 32) {
181
113k
    local_adr++;  /* adjust address */
182
183
    /* clear the destination */
184
113k
    mask = 0XFFFFFFFFU << bits_right;
185
113k
    tmp = be32_to_cpu(*local_adr) & ~mask;
186
187
    /* put the 2nd part of the value into the bitstream */
188
113k
    tmp |= value << bits_right;
189
113k
    *local_adr = cpu_to_be32(tmp);
190
113k
  }
191
256k
  return stream_len;
192
274k
}
193
194
195
/**
196
 * @brief forms the codeword according to the Rice code
197
 *
198
 * @param value   value to be encoded (must be smaller or equal than cmp_ima_max_spill(m))
199
 * @param m   Golomb parameter, only m's which are a power of 2 are allowed
200
 *      maximum allowed Golomb parameter is 0x80000000
201
 * @param log2_m  Rice parameter, is ilog_2(m) calculate outside function
202
 *      for better performance
203
 * @param cw    address where the code word is stored
204
 *
205
 * @warning there is no check of the validity of the input parameters!
206
 * @returns the length of the formed code word in bits; the code word is invalid
207
 *  if the return value is greater than 32
208
 */
209
210
static uint32_t rice_encoder(uint32_t value, uint32_t m, uint32_t log2_m,
211
           uint32_t *cw)
212
110k
{
213
110k
  uint32_t const q = value >> log2_m;  /* quotient of value/m */
214
110k
  uint32_t const qc = (1U << q) - 1;   /* quotient code without ending zero */
215
216
110k
  uint32_t const r = value & (m-1);    /* remainder of value/m */
217
110k
  uint32_t const rl = log2_m + 1;      /* length of the remainder (+1 for the 0 in the quotient code) */
218
219
110k
  *cw = (qc << (rl & 0x1FU)) | r; /* put the quotient and remainder code together */
220
  /*
221
   * NOTE: If log2_m = 31 -> rl = 32, (q << rl) leads to an undefined
222
   * behavior. However, in this case, a valid code with a maximum of 32
223
   * bits can only be formed if q = 0 and qc = 0. To prevent undefined
224
   * behavior, the right shift operand is masked (& 0x1FU)
225
   */
226
227
110k
  return rl + q;  /* calculate the length of the code word */
228
110k
}
229
230
231
/**
232
 * @brief forms a codeword according to the Golomb code
233
 *
234
 * @param value   value to be encoded (must be smaller or equal than cmp_ima_max_spill(m))
235
 * @param m   Golomb parameter (have to be bigger than 0)
236
 * @param log2_m  is ilog_2(m) calculate outside function for better performance
237
 * @param cw    address where the code word is stored
238
 *
239
 * @warning there is no check of the validity of the input parameters!
240
 * @returns the length of the formed code word in bits; the code word is invalid
241
 *  if the return value is greater than 32
242
 */
243
244
static uint32_t golomb_encoder(uint32_t value, uint32_t m, uint32_t log2_m,
245
             uint32_t *cw)
246
287k
{
247
287k
  uint32_t len = log2_m + 1;  /* codeword length in group 0 */
248
287k
  uint32_t const cutoff = (0x2U << log2_m) - m;  /* members in group 0 */
249
250
287k
  if (value < cutoff) {  /* group 0 */
251
190k
    *cw = value;
252
190k
  } else {  /* other groups */
253
96.9k
    uint32_t const reg_mask = 0x1FU;  /* mask for the right shift operand to prevent undefined behavior */
254
96.9k
    uint32_t const g = (value-cutoff) / m;  /* group number of same cw length */
255
96.9k
    uint32_t const r = (value-cutoff) - g * m; /* member in the group */
256
96.9k
    uint32_t const gc = (1U << (g & reg_mask)) - 1; /* prepare the left side in unary */
257
96.9k
    uint32_t const b = cutoff << 1;         /* form the base codeword */
258
259
96.9k
    *cw = gc << ((len+1) & reg_mask);  /* composed codeword part 1 */
260
96.9k
    *cw += b + r;                      /* composed codeword part 2 */
261
96.9k
    len += 1 + g;                      /* length of the codeword */
262
96.9k
  }
263
287k
  return len;
264
287k
}
265
266
267
/**
268
 * @brief generate a code word without an outlier mechanism and put it in the
269
 *  bitstream
270
 *
271
 * @param value   value to encode in the bitstream
272
 * @param stream_len  length of the bitstream in bits
273
 * @param setup   pointer to the encoder setup
274
 *
275
 * @returns the bit length of the bitstream on success or an error code if it
276
 *  fails (which can be tested with cmp_is_error())
277
 */
278
279
static uint32_t encode_normal(uint32_t value, uint32_t stream_len,
280
            const struct encoder_setup *setup)
281
397k
{
282
397k
  uint32_t code_word, cw_len;
283
284
397k
  cw_len = setup->generate_cw_f(value, setup->encoder_par1,
285
397k
              setup->encoder_par2, &code_word);
286
287
397k
  return put_n_bits32(code_word, cw_len, stream_len, setup->bitstream_adr,
288
397k
          setup->max_stream_len);
289
397k
}
290
291
292
/**
293
 * @brief subtracts the model from the data, encodes the result and puts it into
294
 *  bitstream, for encoding outlier use the zero escape symbol mechanism
295
 *
296
 * @param data    data to encode
297
 * @param model   model of the data (0 if not used)
298
 * @param stream_len  length of the bitstream in bits
299
 * @param setup   pointer to the encoder setup
300
 *
301
 * @returns the bit length of the bitstream on success or an error code if it
302
 *  fails (which can be tested with cmp_is_error())
303
 *
304
 * @note no check if the data or model are in the allowed range
305
 * @note no check if the setup->spillover_par is in the allowed range
306
 */
307
308
static uint32_t encode_value_zero(uint32_t data, uint32_t model, uint32_t stream_len,
309
          const struct encoder_setup *setup)
310
264k
{
311
264k
  data -= model; /* possible underflow is intended */
312
313
264k
  data = map_to_pos(data, setup->max_data_bits);
314
315
  /* For performance reasons, we check to see if there is an outlier
316
   * before adding one, rather than the other way around:
317
   * data++;
318
   * if (data < setup->spillover_par && data != 0)
319
   *  return ...
320
   */
321
264k
  if (data < (setup->spillover_par - 1)) { /* detect non-outlier */
322
138k
    data++; /* add 1 to every value so we can use 0 as the escape symbol */
323
138k
    return encode_normal(data, stream_len, setup);
324
138k
  }
325
326
125k
  data++; /* add 1 to every value so we can use 0 as the escape symbol */
327
328
  /* use zero as escape symbol */
329
125k
  stream_len = encode_normal(0, stream_len, setup);
330
125k
  if (cmp_is_error(stream_len))
331
1.05k
    return stream_len;
332
333
  /* put the data unencoded in the bitstream */
334
124k
  stream_len = put_n_bits32(data, setup->max_data_bits, stream_len,
335
124k
          setup->bitstream_adr, setup->max_stream_len);
336
124k
  return stream_len;
337
125k
}
338
339
340
/**
341
 * @brief subtract the model from the data, encode the result and puts it into
342
 *  bitstream, for encoding outlier use the multi escape symbol mechanism
343
 *
344
 * @param data    data to encode
345
 * @param model   model of the data (0 if not used)
346
 * @param stream_len  length of the bitstream in bits
347
 * @param setup   pointer to the encoder setup
348
 *
349
 * @returns the bit length of the bitstream on success or an error code if it
350
 *  fails (which can be tested with cmp_is_error())
351
 *
352
 * @note no check if the data or model are in the allowed range
353
 * @note no check if the setup->spillover_par is in the allowed range
354
 */
355
356
static uint32_t encode_value_multi(uint32_t data, uint32_t model, uint32_t stream_len,
357
           const struct encoder_setup *setup)
358
133k
{
359
133k
  uint32_t unencoded_data;
360
133k
  unsigned int unencoded_data_len;
361
133k
  uint32_t escape_sym, escape_sym_offset;
362
363
133k
  data -= model; /* possible underflow is intended */
364
365
133k
  data = map_to_pos(data, setup->max_data_bits);
366
367
133k
  if (data < setup->spillover_par) /* detect non-outlier */
368
71.2k
    return  encode_normal(data, stream_len, setup);
369
370
  /*
371
   * In this mode we put the difference between the data and the spillover
372
   * threshold value (unencoded_data) after an encoded escape symbol, which
373
   * indicates that the next codeword is unencoded.
374
   * We use different escape symbols depending on the size of the needed
375
   * bit of unencoded data:
376
   * 0, 1, 2 bits needed for unencoded data -> escape symbol is spillover_par + 0
377
   * 3, 4 bits needed for unencoded data -> escape symbol is spillover_par + 1
378
   * 5, 6 bits needed for unencoded data -> escape symbol is spillover_par + 2
379
   * and so on
380
   */
381
61.7k
  unencoded_data = data - setup->spillover_par;
382
383
61.7k
  if (!unencoded_data) /* catch __builtin_clz(0) because the result is undefined.*/
384
162
    escape_sym_offset = 0;
385
61.6k
  else
386
61.6k
    escape_sym_offset = (31U - (uint32_t)__builtin_clz(unencoded_data)) >> 1;
387
388
61.7k
  escape_sym = setup->spillover_par + escape_sym_offset;
389
61.7k
  unencoded_data_len = (escape_sym_offset + 1U) << 1;
390
391
  /* put the escape symbol in the bitstream */
392
61.7k
  stream_len = encode_normal(escape_sym, stream_len, setup);
393
61.7k
  if (cmp_is_error(stream_len))
394
6.07k
    return stream_len;
395
396
  /* put the unencoded data in the bitstream */
397
55.7k
  stream_len = put_n_bits32(unencoded_data, unencoded_data_len, stream_len,
398
55.7k
          setup->bitstream_adr, setup->max_stream_len);
399
55.7k
  return stream_len;
400
61.7k
}
401
402
403
/**
404
 * @brief encodes the data with the model and the given setup and put it into
405
 *  the bitstream
406
 *
407
 * @param data    data to encode
408
 * @param model   model of the data (0 if not used)
409
 * @param stream_len  length of the bitstream in bits
410
 * @param setup   pointer to the encoder setup
411
 *
412
 * @returns the bit length of the bitstream on success or an error code if it
413
 *  fails (which can be tested with cmp_is_error())
414
 */
415
416
static uint32_t encode_value(uint32_t data, uint32_t model, uint32_t stream_len,
417
           const struct encoder_setup *setup)
418
397k
{
419
397k
  uint32_t const mask = ~(0xFFFFFFFFU >> (32-setup->max_data_bits));
420
421
  /* lossy rounding of the data if lossy_par > 0 */
422
397k
  data = round_fwd(data, setup->lossy_par);
423
397k
  model = round_fwd(model, setup->lossy_par);
424
425
397k
  RETURN_ERROR_IF(data & mask || model & mask, DATA_VALUE_TOO_LARGE, "");
426
427
397k
  return setup->encode_method_f(data, model, stream_len, setup);
428
397k
}
429
430
431
/**
432
 * @brief calculate the maximum length of the bitstream in bits
433
 * @note we round down to the next 4-byte allied address because we access the
434
 *  cmp_buffer in uint32_t words
435
 *
436
 * @param stream_size size of the bitstream in bytes
437
 *
438
 * @returns buffer size in bits
439
 */
440
441
static uint32_t cmp_stream_size_to_bits(uint32_t stream_size)
442
182k
{
443
182k
  return (stream_size & ~0x3U) * 8;
444
182k
}
445
446
447
/**
448
 * @brief configure an encoder setup structure to have a setup to encode a value
449
 *
450
 * @param setup   pointer to the encoder setup
451
 * @param cmp_par compression parameter
452
 * @param spillover spillover_par parameter
453
 * @param lossy_par lossy compression parameter
454
 * @param max_data_bits how many bits are needed to represent the highest possible value
455
 * @param cfg   pointer to the compression configuration structure
456
 *
457
 * @warning input parameters are not checked for validity
458
 */
459
460
static void configure_encoder_setup(struct encoder_setup *setup,
461
            uint32_t cmp_par, uint32_t spillover,
462
            uint32_t lossy_par, uint32_t max_data_bits,
463
            const struct cmp_cfg *cfg)
464
130k
{
465
130k
  memset(setup, 0, sizeof(struct encoder_setup));
466
467
130k
  setup->encoder_par1 = cmp_par;
468
130k
  setup->max_data_bits = max_data_bits;
469
130k
  setup->lossy_par = lossy_par;
470
130k
  setup->bitstream_adr = cfg->dst;
471
130k
  setup->max_stream_len = cmp_stream_size_to_bits(cfg->stream_size);
472
130k
  setup->encoder_par2 = ilog_2(cmp_par);
473
130k
  setup->spillover_par = spillover;
474
475
  /* for encoder_par1 which is a power of two we can use the faster rice_encoder */
476
130k
  if (is_a_pow_of_2(setup->encoder_par1))
477
32.8k
    setup->generate_cw_f = &rice_encoder;
478
97.5k
  else
479
97.5k
    setup->generate_cw_f = &golomb_encoder;
480
481
  /* CMP_MODE_RAW is already handled before */
482
130k
  if (cfg->cmp_mode == CMP_MODE_MODEL_ZERO ||
483
130k
      cfg->cmp_mode == CMP_MODE_DIFF_ZERO)
484
81.4k
    setup->encode_method_f = &encode_value_zero;
485
48.9k
  else
486
48.9k
    setup->encode_method_f = &encode_value_multi;
487
130k
}
488
489
490
/**
491
 * @brief compress imagette data
492
 *
493
 * @param cfg   pointer to the compression configuration structure
494
 * @param stream_len  already used length of the bitstream in bits
495
 *
496
 * @returns the bit length of the bitstream on success or an error code if it
497
 *  fails (which can be tested with cmp_is_error())
498
 */
499
500
static uint32_t compress_imagette(const struct cmp_cfg *cfg, uint32_t stream_len)
501
18.1k
{
502
18.1k
  size_t i;
503
18.1k
  struct encoder_setup setup;
504
18.1k
  uint32_t max_data_bits;
505
506
18.1k
  const uint16_t *data_buf = cfg->src;
507
18.1k
  const uint16_t *model_buf = cfg->model_buf;
508
18.1k
  uint16_t model = 0;
509
18.1k
  const uint16_t *next_model_p = data_buf;
510
18.1k
  uint16_t *up_model_buf = NULL;
511
512
18.1k
  if (model_mode_is_used(cfg->cmp_mode)) {
513
14.8k
    model = get_unaligned(&model_buf[0]);
514
14.8k
    next_model_p = &model_buf[1];
515
14.8k
    up_model_buf = cfg->updated_model_buf;
516
14.8k
  }
517
518
18.1k
  if (cfg->data_type == DATA_TYPE_F_CAM_IMAGETTE ||
519
18.1k
      cfg->data_type == DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE) {
520
785
    max_data_bits = MAX_USED_BITS.fc_imagette;
521
17.4k
  } else if (cfg->data_type == DATA_TYPE_SAT_IMAGETTE ||
522
17.4k
       cfg->data_type == DATA_TYPE_SAT_IMAGETTE_ADAPTIVE) {
523
16.2k
    max_data_bits = MAX_USED_BITS.saturated_imagette;
524
16.2k
  } else { /* DATA_TYPE_IMAGETTE, DATA_TYPE_IMAGETTE_ADAPTIVE */
525
1.17k
    max_data_bits = MAX_USED_BITS.nc_imagette;
526
1.17k
  }
527
528
18.1k
  configure_encoder_setup(&setup, cfg->cmp_par_imagette,
529
18.1k
        cfg->spill_imagette, cfg->round, max_data_bits, cfg);
530
531
83.5k
  for (i = 0;; i++) {
532
83.5k
    stream_len = encode_value(get_unaligned(&data_buf[i]),
533
83.5k
            model, stream_len, &setup);
534
83.5k
    if (cmp_is_error(stream_len))
535
10.5k
      break;
536
537
72.9k
    if (up_model_buf) {
538
31.2k
      uint16_t data = get_unaligned(&data_buf[i]);
539
31.2k
      up_model_buf[i] = cmp_up_model(data, model, cfg->model_value,
540
31.2k
                   setup.lossy_par);
541
31.2k
    }
542
72.9k
    if (i >= cfg->samples-1)
543
7.60k
      break;
544
545
65.3k
    model = get_unaligned(&next_model_p[i]);
546
65.3k
  }
547
18.1k
  return stream_len;
548
18.1k
}
549
550
551
/**
552
 * @brief compress short normal light flux (S_FX) data
553
 *
554
 * @param cfg   pointer to the compression configuration structure
555
 * @param stream_len  already used length of the bitstream in bits
556
 *
557
 * @returns the bit length of the bitstream on success or an error code if it
558
 *  fails (which can be tested with cmp_is_error())
559
 */
560
561
static uint32_t compress_s_fx(const struct cmp_cfg *cfg, uint32_t stream_len)
562
1.72k
{
563
1.72k
  size_t i;
564
565
1.72k
  const struct s_fx *data_buf = cfg->src;
566
1.72k
  const struct s_fx *model_buf = cfg->model_buf;
567
1.72k
  struct s_fx *up_model_buf = NULL;
568
1.72k
  const struct s_fx *next_model_p;
569
1.72k
  struct s_fx model;
570
1.72k
  struct encoder_setup setup_exp_flag, setup_fx;
571
572
1.72k
  if (model_mode_is_used(cfg->cmp_mode)) {
573
554
    model = model_buf[0];
574
554
    next_model_p = &model_buf[1];
575
554
    up_model_buf = cfg->updated_model_buf;
576
1.17k
  } else {
577
1.17k
    memset(&model, 0, sizeof(model));
578
1.17k
    next_model_p = data_buf;
579
1.17k
  }
580
581
1.72k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
582
1.72k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
583
1.72k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
584
1.72k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
585
586
8.85k
  for (i = 0;; i++) {
587
8.85k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
588
8.85k
            stream_len, &setup_exp_flag);
589
8.85k
    if (cmp_is_error(stream_len))
590
172
      break;
591
8.68k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
592
8.68k
            &setup_fx);
593
8.68k
    if (cmp_is_error(stream_len))
594
330
      break;
595
596
8.35k
    if (up_model_buf) {
597
2.01k
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
598
2.01k
                 cfg->model_value, setup_exp_flag.lossy_par);
599
2.01k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
600
2.01k
                cfg->model_value, setup_fx.lossy_par);
601
2.01k
    }
602
603
8.35k
    if (i >= cfg->samples-1)
604
1.22k
      break;
605
606
7.12k
    model = next_model_p[i];
607
7.12k
  }
608
1.72k
  return stream_len;
609
1.72k
}
610
611
612
/**
613
 * @brief compress S_FX_EFX data
614
 *
615
 * @param cfg   pointer to the compression configuration structure
616
 * @param stream_len  already used length of the bitstream in bits
617
 *
618
 * @returns the bit length of the bitstream on success or an error code if it
619
 *  fails (which can be tested with cmp_is_error())
620
 */
621
622
static uint32_t compress_s_fx_efx(const struct cmp_cfg *cfg, uint32_t stream_len)
623
2.12k
{
624
2.12k
  size_t i;
625
626
2.12k
  const struct s_fx_efx *data_buf = cfg->src;
627
2.12k
  const struct s_fx_efx *model_buf = cfg->model_buf;
628
2.12k
  struct s_fx_efx *up_model_buf = NULL;
629
2.12k
  const struct s_fx_efx *next_model_p;
630
2.12k
  struct s_fx_efx model;
631
2.12k
  struct encoder_setup setup_exp_flag, setup_fx, setup_efx;
632
633
2.12k
  if (model_mode_is_used(cfg->cmp_mode)) {
634
940
    model = model_buf[0];
635
940
    next_model_p = &model_buf[1];
636
940
    up_model_buf = cfg->updated_model_buf;
637
1.18k
  } else {
638
1.18k
    memset(&model, 0, sizeof(model));
639
1.18k
    next_model_p = data_buf;
640
1.18k
  }
641
642
2.12k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
643
2.12k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
644
2.12k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
645
2.12k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
646
2.12k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
647
2.12k
        cfg->round, MAX_USED_BITS.s_efx, cfg);
648
649
7.98k
  for (i = 0;; i++) {
650
7.98k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
651
7.98k
            stream_len, &setup_exp_flag);
652
7.98k
    if (cmp_is_error(stream_len))
653
113
      break;
654
7.86k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
655
7.86k
            &setup_fx);
656
7.86k
    if (cmp_is_error(stream_len))
657
232
      break;
658
7.63k
    stream_len = encode_value(data_buf[i].efx, model.efx,
659
7.63k
            stream_len, &setup_efx);
660
7.63k
    if (cmp_is_error(stream_len))
661
210
      return stream_len;
662
663
7.42k
    if (up_model_buf) {
664
2.54k
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
665
2.54k
        cfg->model_value, setup_exp_flag.lossy_par);
666
2.54k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
667
2.54k
        cfg->model_value, setup_fx.lossy_par);
668
2.54k
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
669
2.54k
        cfg->model_value, setup_efx.lossy_par);
670
2.54k
    }
671
672
7.42k
    if (i >= cfg->samples-1)
673
1.56k
      break;
674
675
5.86k
    model = next_model_p[i];
676
5.86k
  }
677
1.91k
  return stream_len;
678
2.12k
}
679
680
681
/**
682
 * @brief compress S_FX_NCOB data
683
 *
684
 * @param cfg   pointer to the compression configuration structure
685
 * @param stream_len  already used length of the bitstream in bits
686
 *
687
 * @returns the bit length of the bitstream on success or an error code if it
688
 *  fails (which can be tested with cmp_is_error())
689
 */
690
691
static uint32_t compress_s_fx_ncob(const struct cmp_cfg *cfg, uint32_t stream_len)
692
1.37k
{
693
1.37k
  size_t i;
694
695
1.37k
  const struct s_fx_ncob *data_buf = cfg->src;
696
1.37k
  const struct s_fx_ncob *model_buf = cfg->model_buf;
697
1.37k
  struct s_fx_ncob *up_model_buf = NULL;
698
1.37k
  const struct s_fx_ncob *next_model_p;
699
1.37k
  struct s_fx_ncob model;
700
1.37k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob;
701
702
1.37k
  if (model_mode_is_used(cfg->cmp_mode)) {
703
583
    model = model_buf[0];
704
583
    next_model_p = &model_buf[1];
705
583
    up_model_buf = cfg->updated_model_buf;
706
795
  } else {
707
795
    memset(&model, 0, sizeof(model));
708
795
    next_model_p = data_buf;
709
795
  }
710
711
1.37k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
712
1.37k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
713
1.37k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
714
1.37k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
715
1.37k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
716
1.37k
        cfg->round, MAX_USED_BITS.s_ncob, cfg);
717
718
7.37k
  for (i = 0;; i++) {
719
7.37k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
720
7.37k
            stream_len, &setup_exp_flag);
721
7.37k
    if (cmp_is_error(stream_len))
722
65
      break;
723
7.31k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
724
7.31k
            &setup_fx);
725
7.31k
    if (cmp_is_error(stream_len))
726
107
      break;
727
7.20k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
728
7.20k
            stream_len, &setup_ncob);
729
7.20k
    if (cmp_is_error(stream_len))
730
157
      break;
731
7.04k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
732
7.04k
            stream_len, &setup_ncob);
733
7.04k
    if (cmp_is_error(stream_len))
734
129
      break;
735
736
6.92k
    if (up_model_buf) {
737
2.21k
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
738
2.21k
        cfg->model_value, setup_exp_flag.lossy_par);
739
2.21k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
740
2.21k
        cfg->model_value, setup_fx.lossy_par);
741
2.21k
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
742
2.21k
        cfg->model_value, setup_ncob.lossy_par);
743
2.21k
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
744
2.21k
        cfg->model_value, setup_ncob.lossy_par);
745
2.21k
    }
746
747
6.92k
    if (i >= cfg->samples-1)
748
920
      break;
749
750
6.00k
    model = next_model_p[i];
751
6.00k
  }
752
1.37k
  return stream_len;
753
1.37k
}
754
755
756
/**
757
 * @brief compress S_FX_EFX_NCOB_ECOB data
758
 *
759
 * @param cfg   pointer to the compression configuration structure
760
 * @param stream_len  already used length of the bitstream in bits
761
 *
762
 * @returns the bit length of the bitstream on success or an error code if it
763
 *  fails (which can be tested with cmp_is_error())
764
 */
765
766
static uint32_t compress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, uint32_t stream_len)
767
2.29k
{
768
2.29k
  size_t i;
769
770
2.29k
  const struct s_fx_efx_ncob_ecob *data_buf = cfg->src;
771
2.29k
  const struct s_fx_efx_ncob_ecob *model_buf = cfg->model_buf;
772
2.29k
  struct s_fx_efx_ncob_ecob *up_model_buf = NULL;
773
2.29k
  const struct s_fx_efx_ncob_ecob *next_model_p;
774
2.29k
  struct s_fx_efx_ncob_ecob model;
775
2.29k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob, setup_efx,
776
2.29k
            setup_ecob;
777
778
2.29k
  if (model_mode_is_used(cfg->cmp_mode)) {
779
885
    model = model_buf[0];
780
885
    next_model_p = &model_buf[1];
781
885
    up_model_buf = cfg->updated_model_buf;
782
1.41k
  } else {
783
1.41k
    memset(&model, 0, sizeof(model));
784
1.41k
    next_model_p = data_buf;
785
1.41k
  }
786
787
2.29k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
788
2.29k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
789
2.29k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
790
2.29k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
791
2.29k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
792
2.29k
        cfg->round, MAX_USED_BITS.s_ncob, cfg);
793
2.29k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
794
2.29k
        cfg->round, MAX_USED_BITS.s_efx, cfg);
795
2.29k
  configure_encoder_setup(&setup_ecob, cfg->cmp_par_ecob, cfg->spill_ecob,
796
2.29k
        cfg->round, MAX_USED_BITS.s_ecob, cfg);
797
798
4.74k
  for (i = 0;; i++) {
799
4.74k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
800
4.74k
            stream_len, &setup_exp_flag);
801
4.74k
    if (cmp_is_error(stream_len))
802
67
      break;
803
4.67k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
804
4.67k
            &setup_fx);
805
4.67k
    if (cmp_is_error(stream_len))
806
106
      break;
807
4.57k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
808
4.57k
            stream_len, &setup_ncob);
809
4.57k
    if (cmp_is_error(stream_len))
810
149
      break;
811
4.42k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
812
4.42k
            stream_len, &setup_ncob);
813
4.42k
    if (cmp_is_error(stream_len))
814
127
      break;
815
4.29k
    stream_len = encode_value(data_buf[i].efx, model.efx,
816
4.29k
            stream_len, &setup_efx);
817
4.29k
    if (cmp_is_error(stream_len))
818
246
      break;
819
4.05k
    stream_len = encode_value(data_buf[i].ecob_x, model.ecob_x,
820
4.05k
            stream_len, &setup_ecob);
821
4.05k
    if (cmp_is_error(stream_len))
822
132
      break;
823
3.91k
    stream_len = encode_value(data_buf[i].ecob_y, model.ecob_y,
824
3.91k
            stream_len, &setup_ecob);
825
3.91k
    if (cmp_is_error(stream_len))
826
123
      break;
827
828
3.79k
    if (up_model_buf) {
829
1.42k
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
830
1.42k
        cfg->model_value, setup_exp_flag.lossy_par);
831
1.42k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
832
1.42k
        cfg->model_value, setup_fx.lossy_par);
833
1.42k
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
834
1.42k
        cfg->model_value, setup_ncob.lossy_par);
835
1.42k
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
836
1.42k
        cfg->model_value, setup_ncob.lossy_par);
837
1.42k
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
838
1.42k
        cfg->model_value, setup_efx.lossy_par);
839
1.42k
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
840
1.42k
        cfg->model_value, setup_ecob.lossy_par);
841
1.42k
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
842
1.42k
        cfg->model_value, setup_ecob.lossy_par);
843
1.42k
    }
844
845
3.79k
    if (i >= cfg->samples-1)
846
1.34k
      break;
847
848
2.44k
    model = next_model_p[i];
849
2.44k
  }
850
2.29k
  return stream_len;
851
2.29k
}
852
853
854
/**
855
 * @brief compress L_FX data
856
 *
857
 * @param cfg   pointer to the compression configuration structure
858
 * @param stream_len  already used length of the bitstream in bits
859
 *
860
 * @returns the bit length of the bitstream on success or an error code if it
861
 *  fails (which can be tested with cmp_is_error())
862
 */
863
864
static uint32_t compress_l_fx(const struct cmp_cfg *cfg, uint32_t stream_len)
865
7.69k
{
866
7.69k
  size_t i;
867
868
7.69k
  const struct l_fx *data_buf = cfg->src;
869
7.69k
  const struct l_fx *model_buf = cfg->model_buf;
870
7.69k
  struct l_fx *up_model_buf = NULL;
871
7.69k
  const struct l_fx *next_model_p;
872
7.69k
  struct l_fx model;
873
7.69k
  struct encoder_setup setup_exp_flag, setup_fx, setup_fx_var;
874
875
7.69k
  if (model_mode_is_used(cfg->cmp_mode)) {
876
5.61k
    model = model_buf[0];
877
5.61k
    next_model_p = &model_buf[1];
878
5.61k
    up_model_buf = cfg->updated_model_buf;
879
5.61k
  } else {
880
2.08k
    memset(&model, 0, sizeof(model));
881
2.08k
    next_model_p = data_buf;
882
2.08k
  }
883
884
7.69k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
885
7.69k
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
886
7.69k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
887
7.69k
        cfg->round, MAX_USED_BITS.l_fx, cfg);
888
7.69k
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
889
7.69k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
890
891
10.5k
  for (i = 0;; i++) {
892
10.5k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
893
10.5k
            stream_len, &setup_exp_flag);
894
10.5k
    if (cmp_is_error(stream_len))
895
71
      break;
896
10.4k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
897
10.4k
            &setup_fx);
898
10.4k
    if (cmp_is_error(stream_len))
899
343
      break;
900
10.1k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
901
10.1k
            stream_len, &setup_fx_var);
902
10.1k
    if (cmp_is_error(stream_len))
903
635
      break;
904
905
9.48k
    if (up_model_buf) {
906
5.50k
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
907
5.50k
        cfg->model_value, setup_exp_flag.lossy_par);
908
5.50k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
909
5.50k
        cfg->model_value, setup_fx.lossy_par);
910
5.50k
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
911
5.50k
        cfg->model_value, setup_fx_var.lossy_par);
912
5.50k
    }
913
914
9.48k
    if (i >= cfg->samples-1)
915
6.64k
      break;
916
917
2.83k
    model = next_model_p[i];
918
2.83k
  }
919
7.69k
  return stream_len;
920
7.69k
}
921
922
923
/**
924
 * @brief compress L_FX_EFX data
925
 *
926
 * @param cfg   pointer to the compression configuration structure
927
 * @param stream_len  already used length of the bitstream in bits
928
 *
929
 * @returns the bit length of the bitstream on success or an error code if it
930
 *  fails (which can be tested with cmp_is_error())
931
 */
932
933
static uint32_t compress_l_fx_efx(const struct cmp_cfg *cfg, uint32_t stream_len)
934
1.28k
{
935
1.28k
  size_t i;
936
937
1.28k
  const struct l_fx_efx *data_buf = cfg->src;
938
1.28k
  const struct l_fx_efx *model_buf = cfg->model_buf;
939
1.28k
  struct l_fx_efx *up_model_buf = NULL;
940
1.28k
  const struct l_fx_efx *next_model_p;
941
1.28k
  struct l_fx_efx model;
942
1.28k
  struct encoder_setup setup_exp_flag, setup_fx, setup_efx, setup_fx_var;
943
944
1.28k
  if (model_mode_is_used(cfg->cmp_mode)) {
945
856
    model = model_buf[0];
946
856
    next_model_p = &model_buf[1];
947
856
    up_model_buf = cfg->updated_model_buf;
948
856
  } else {
949
431
    memset(&model, 0, sizeof(model));
950
431
    next_model_p = data_buf;
951
431
  }
952
953
1.28k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
954
1.28k
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
955
1.28k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
956
1.28k
        cfg->round, MAX_USED_BITS.l_fx, cfg);
957
1.28k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
958
1.28k
        cfg->round, MAX_USED_BITS.l_efx, cfg);
959
1.28k
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
960
1.28k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
961
962
4.36k
  for (i = 0;; i++) {
963
4.36k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
964
4.36k
            stream_len, &setup_exp_flag);
965
4.36k
    if (cmp_is_error(stream_len))
966
36
      break;
967
4.33k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
968
4.33k
            &setup_fx);
969
4.33k
    if (cmp_is_error(stream_len))
970
96
      break;
971
4.23k
    stream_len = encode_value(data_buf[i].efx, model.efx,
972
4.23k
            stream_len, &setup_efx);
973
4.23k
    if (cmp_is_error(stream_len))
974
115
      break;
975
4.12k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
976
4.12k
            stream_len, &setup_fx_var);
977
4.12k
    if (cmp_is_error(stream_len))
978
122
      break;
979
980
4.00k
    if (up_model_buf) {
981
1.80k
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
982
1.80k
        cfg->model_value, setup_exp_flag.lossy_par);
983
1.80k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
984
1.80k
        cfg->model_value, setup_fx.lossy_par);
985
1.80k
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
986
1.80k
        cfg->model_value, setup_efx.lossy_par);
987
1.80k
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
988
1.80k
        cfg->model_value, setup_fx_var.lossy_par);
989
1.80k
    }
990
991
4.00k
    if (i >= cfg->samples-1)
992
918
      break;
993
994
3.08k
    model = next_model_p[i];
995
3.08k
  }
996
1.28k
  return stream_len;
997
1.28k
}
998
999
1000
/**
1001
 * @brief compress L_FX_NCOB data
1002
 *
1003
 * @param cfg   pointer to the compression configuration structure
1004
 * @param stream_len  already used length of the bitstream in bits
1005
 *
1006
 * @returns the bit length of the bitstream on success or an error code if it
1007
 *  fails (which can be tested with cmp_is_error())
1008
 */
1009
1010
static uint32_t compress_l_fx_ncob(const struct cmp_cfg *cfg, uint32_t stream_len)
1011
1.00k
{
1012
1.00k
  size_t i;
1013
1014
1.00k
  const struct l_fx_ncob *data_buf = cfg->src;
1015
1.00k
  const struct l_fx_ncob *model_buf = cfg->model_buf;
1016
1.00k
  struct l_fx_ncob *up_model_buf = NULL;
1017
1.00k
  const struct l_fx_ncob *next_model_p;
1018
1.00k
  struct l_fx_ncob model;
1019
1.00k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob,
1020
1.00k
            setup_fx_var, setup_cob_var;
1021
1022
1.00k
  if (model_mode_is_used(cfg->cmp_mode)) {
1023
169
    model = model_buf[0];
1024
169
    next_model_p = &model_buf[1];
1025
169
    up_model_buf = cfg->updated_model_buf;
1026
832
  } else {
1027
832
    memset(&model, 0, sizeof(model));
1028
832
    next_model_p = data_buf;
1029
832
  }
1030
1031
1.00k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1032
1.00k
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
1033
1.00k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
1034
1.00k
        cfg->round, MAX_USED_BITS.l_fx, cfg);
1035
1.00k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
1036
1.00k
        cfg->round, MAX_USED_BITS.l_ncob, cfg);
1037
  /* we use the cmp_par_fx_cob_variance parameter for fx and cob variance data */
1038
1.00k
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1039
1.00k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1040
1.00k
  configure_encoder_setup(&setup_cob_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1041
1.00k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1042
1043
5.64k
  for (i = 0;; i++) {
1044
5.64k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
1045
5.64k
            stream_len, &setup_exp_flag);
1046
5.64k
    if (cmp_is_error(stream_len))
1047
36
      break;
1048
5.61k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
1049
5.61k
            &setup_fx);
1050
5.61k
    if (cmp_is_error(stream_len))
1051
35
      break;
1052
5.57k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
1053
5.57k
            stream_len, &setup_ncob);
1054
5.57k
    if (cmp_is_error(stream_len))
1055
41
      break;
1056
5.53k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
1057
5.53k
            stream_len, &setup_ncob);
1058
5.53k
    if (cmp_is_error(stream_len))
1059
41
      break;
1060
5.49k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
1061
5.49k
            stream_len, &setup_fx_var);
1062
5.49k
    if (cmp_is_error(stream_len))
1063
38
      break;
1064
5.45k
    stream_len = encode_value(data_buf[i].cob_x_variance, model.cob_x_variance,
1065
5.45k
            stream_len, &setup_cob_var);
1066
5.45k
    if (cmp_is_error(stream_len))
1067
36
      break;
1068
5.41k
    stream_len = encode_value(data_buf[i].cob_y_variance, model.cob_y_variance,
1069
5.41k
            stream_len, &setup_cob_var);
1070
5.41k
    if (cmp_is_error(stream_len))
1071
38
      break;
1072
1073
5.38k
    if (up_model_buf) {
1074
424
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1075
424
        cfg->model_value, setup_exp_flag.lossy_par);
1076
424
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1077
424
        cfg->model_value, setup_fx.lossy_par);
1078
424
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1079
424
        cfg->model_value, setup_ncob.lossy_par);
1080
424
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1081
424
        cfg->model_value, setup_ncob.lossy_par);
1082
424
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1083
424
        cfg->model_value, setup_fx_var.lossy_par);
1084
424
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1085
424
        cfg->model_value, setup_cob_var.lossy_par);
1086
424
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1087
424
        cfg->model_value, setup_cob_var.lossy_par);
1088
424
    }
1089
1090
5.38k
    if (i >= cfg->samples-1)
1091
736
      break;
1092
1093
4.64k
    model = next_model_p[i];
1094
4.64k
  }
1095
1.00k
  return stream_len;
1096
1.00k
}
1097
1098
1099
/**
1100
 * @brief compress L_FX_EFX_NCOB_ECOB data
1101
 *
1102
 * @param cfg   pointer to the compression configuration structure
1103
 * @param stream_len  already used length of the bitstream in bits
1104
 *
1105
 * @returns the bit length of the bitstream on success or an error code if it
1106
 *  fails (which can be tested with cmp_is_error())
1107
 */
1108
1109
static uint32_t compress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, uint32_t stream_len)
1110
3.06k
{
1111
3.06k
  size_t i;
1112
1113
3.06k
  const struct l_fx_efx_ncob_ecob *data_buf = cfg->src;
1114
3.06k
  const struct l_fx_efx_ncob_ecob *model_buf = cfg->model_buf;
1115
3.06k
  struct l_fx_efx_ncob_ecob *up_model_buf = NULL;
1116
3.06k
  const struct l_fx_efx_ncob_ecob *next_model_p;
1117
3.06k
  struct l_fx_efx_ncob_ecob model;
1118
3.06k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob, setup_efx,
1119
3.06k
            setup_ecob, setup_fx_var, setup_cob_var;
1120
1121
3.06k
  if (model_mode_is_used(cfg->cmp_mode)) {
1122
963
    model = model_buf[0];
1123
963
    next_model_p = &model_buf[1];
1124
963
    up_model_buf = cfg->updated_model_buf;
1125
2.10k
  } else {
1126
2.10k
    memset(&model, 0, sizeof(model));
1127
2.10k
    next_model_p = data_buf;
1128
2.10k
  }
1129
1130
3.06k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1131
3.06k
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
1132
3.06k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
1133
3.06k
        cfg->round, MAX_USED_BITS.l_fx, cfg);
1134
3.06k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
1135
3.06k
        cfg->round, MAX_USED_BITS.l_ncob, cfg);
1136
3.06k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
1137
3.06k
        cfg->round, MAX_USED_BITS.l_efx, cfg);
1138
3.06k
  configure_encoder_setup(&setup_ecob, cfg->cmp_par_ecob, cfg->spill_ecob,
1139
3.06k
        cfg->round, MAX_USED_BITS.l_ecob, cfg);
1140
  /* we use compression parameters for both variance data fields */
1141
3.06k
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1142
3.06k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1143
3.06k
  configure_encoder_setup(&setup_cob_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1144
3.06k
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1145
1146
4.77k
  for (i = 0;; i++) {
1147
4.77k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
1148
4.77k
            stream_len, &setup_exp_flag);
1149
4.77k
    if (cmp_is_error(stream_len))
1150
20
      break;
1151
4.75k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
1152
4.75k
            &setup_fx);
1153
4.75k
    if (cmp_is_error(stream_len))
1154
21
      break;
1155
4.73k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
1156
4.73k
            stream_len, &setup_ncob);
1157
4.73k
    if (cmp_is_error(stream_len))
1158
20
      break;
1159
4.71k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
1160
4.71k
            stream_len, &setup_ncob);
1161
4.71k
    if (cmp_is_error(stream_len))
1162
47
      break;
1163
4.66k
    stream_len = encode_value(data_buf[i].efx, model.efx,
1164
4.66k
            stream_len, &setup_efx);
1165
4.66k
    if (cmp_is_error(stream_len))
1166
55
      break;
1167
4.61k
    stream_len = encode_value(data_buf[i].ecob_x, model.ecob_x,
1168
4.61k
            stream_len, &setup_ecob);
1169
4.61k
    if (cmp_is_error(stream_len))
1170
82
      break;
1171
4.53k
    stream_len = encode_value(data_buf[i].ecob_y, model.ecob_y,
1172
4.53k
            stream_len, &setup_ecob);
1173
4.53k
    if (cmp_is_error(stream_len))
1174
91
      break;
1175
4.44k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
1176
4.44k
            stream_len, &setup_fx_var);
1177
4.44k
    if (cmp_is_error(stream_len))
1178
155
      break;
1179
4.28k
    stream_len = encode_value(data_buf[i].cob_x_variance, model.cob_x_variance,
1180
4.28k
            stream_len, &setup_cob_var);
1181
4.28k
    if (cmp_is_error(stream_len))
1182
191
      break;
1183
4.09k
    stream_len = encode_value(data_buf[i].cob_y_variance, model.cob_y_variance,
1184
4.09k
            stream_len, &setup_cob_var);
1185
4.09k
    if (cmp_is_error(stream_len))
1186
184
      break;
1187
1188
3.91k
    if (up_model_buf) {
1189
1.41k
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1190
1.41k
        cfg->model_value, setup_exp_flag.lossy_par);
1191
1.41k
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1192
1.41k
        cfg->model_value, setup_fx.lossy_par);
1193
1.41k
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1194
1.41k
        cfg->model_value, setup_ncob.lossy_par);
1195
1.41k
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1196
1.41k
        cfg->model_value, setup_ncob.lossy_par);
1197
1.41k
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
1198
1.41k
        cfg->model_value, setup_efx.lossy_par);
1199
1.41k
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
1200
1.41k
        cfg->model_value, setup_ecob.lossy_par);
1201
1.41k
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
1202
1.41k
        cfg->model_value, setup_ecob.lossy_par);
1203
1.41k
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1204
1.41k
        cfg->model_value, setup_fx_var.lossy_par);
1205
1.41k
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1206
1.41k
        cfg->model_value, setup_cob_var.lossy_par);
1207
1.41k
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1208
1.41k
        cfg->model_value, setup_cob_var.lossy_par);
1209
1.41k
    }
1210
1211
3.91k
    if (i >= cfg->samples-1)
1212
2.20k
      break;
1213
1214
1.71k
    model = next_model_p[i];
1215
1.71k
  }
1216
3.06k
  return stream_len;
1217
3.06k
}
1218
1219
1220
/**
1221
 * @brief compress offset data from the normal and fast cameras
1222
 *
1223
 * @param cfg   pointer to the compression configuration structure
1224
 * @param stream_len  already used length of the bitstream in bits
1225
 *
1226
 * @returns the bit length of the bitstream on success or an error code if it
1227
 *  fails (which can be tested with cmp_is_error())
1228
 */
1229
1230
static uint32_t compress_offset(const struct cmp_cfg *cfg, uint32_t stream_len)
1231
1.96k
{
1232
1.96k
  size_t i;
1233
1234
1.96k
  const struct offset *data_buf = cfg->src;
1235
1.96k
  const struct offset *model_buf = cfg->model_buf;
1236
1.96k
  struct offset *up_model_buf = NULL;
1237
1.96k
  const struct offset *next_model_p;
1238
1.96k
  struct offset model;
1239
1.96k
  struct encoder_setup setup_mean, setup_var;
1240
1241
1.96k
  if (model_mode_is_used(cfg->cmp_mode)) {
1242
733
    model = model_buf[0];
1243
733
    next_model_p = &model_buf[1];
1244
733
    up_model_buf = cfg->updated_model_buf;
1245
1.23k
  } else {
1246
1.23k
    memset(&model, 0, sizeof(model));
1247
1.23k
    next_model_p = data_buf;
1248
1.23k
  }
1249
1250
1.96k
  {
1251
1.96k
    unsigned int mean_bits_used, variance_bits_used;
1252
1253
1.96k
    if (cfg->data_type == DATA_TYPE_F_CAM_OFFSET) {
1254
813
      mean_bits_used = MAX_USED_BITS.fc_offset_mean;
1255
813
      variance_bits_used = MAX_USED_BITS.fc_offset_variance;
1256
1.15k
    } else { /* DATA_TYPE_OFFSET */
1257
1.15k
      mean_bits_used = MAX_USED_BITS.nc_offset_mean;
1258
1.15k
      variance_bits_used = MAX_USED_BITS.nc_offset_variance;
1259
1.15k
    }
1260
1261
1.96k
    configure_encoder_setup(&setup_mean, cfg->cmp_par_offset_mean, cfg->spill_offset_mean,
1262
1.96k
          cfg->round, mean_bits_used, cfg);
1263
1.96k
    configure_encoder_setup(&setup_var, cfg->cmp_par_offset_variance, cfg->spill_offset_variance,
1264
1.96k
          cfg->round, variance_bits_used, cfg);
1265
1.96k
  }
1266
1267
8.13k
  for (i = 0;; i++) {
1268
8.13k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1269
8.13k
            stream_len, &setup_mean);
1270
8.13k
    if (cmp_is_error(stream_len))
1271
164
      return stream_len;
1272
7.97k
    stream_len = encode_value(data_buf[i].variance, model.variance,
1273
7.97k
            stream_len, &setup_var);
1274
7.97k
    if (cmp_is_error(stream_len))
1275
173
      return stream_len;
1276
1277
7.80k
    if (up_model_buf) {
1278
2.71k
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1279
2.71k
        cfg->model_value, setup_mean.lossy_par);
1280
2.71k
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance, model.variance,
1281
2.71k
        cfg->model_value, setup_var.lossy_par);
1282
2.71k
    }
1283
1284
7.80k
    if (i >= cfg->samples-1)
1285
1.63k
      break;
1286
1287
6.17k
    model = next_model_p[i];
1288
6.17k
  }
1289
1.63k
  return stream_len;
1290
1.96k
}
1291
1292
1293
/**
1294
 * @brief compress background data from the normal and fast cameras
1295
 *
1296
 * @param cfg   pointer to the compression configuration structure
1297
 * @param stream_len  already used length of the bitstream in bits
1298
 *
1299
 * @returns the bit length of the bitstream on success or an error code if it
1300
 *  fails (which can be tested with cmp_is_error())
1301
 */
1302
1303
static uint32_t compress_background(const struct cmp_cfg *cfg, uint32_t stream_len)
1304
5.22k
{
1305
5.22k
  size_t i;
1306
1307
5.22k
  const struct background *data_buf = cfg->src;
1308
5.22k
  const struct background *model_buf = cfg->model_buf;
1309
5.22k
  struct background *up_model_buf = NULL;
1310
5.22k
  const struct background *next_model_p;
1311
5.22k
  struct background model;
1312
5.22k
  struct encoder_setup setup_mean, setup_var, setup_pix;
1313
1314
5.22k
  if (model_mode_is_used(cfg->cmp_mode)) {
1315
2.71k
    model = model_buf[0];
1316
2.71k
    next_model_p = &model_buf[1];
1317
2.71k
    up_model_buf = cfg->updated_model_buf;
1318
2.71k
  } else {
1319
2.50k
    memset(&model, 0, sizeof(model));
1320
2.50k
    next_model_p = data_buf;
1321
2.50k
  }
1322
1323
5.22k
  {
1324
5.22k
    unsigned int mean_used_bits, varinace_used_bits, pixels_error_used_bits;
1325
1326
5.22k
    if (cfg->data_type == DATA_TYPE_F_CAM_BACKGROUND) {
1327
4.84k
      mean_used_bits = MAX_USED_BITS.fc_background_mean;
1328
4.84k
      varinace_used_bits = MAX_USED_BITS.fc_background_variance;
1329
4.84k
      pixels_error_used_bits = MAX_USED_BITS.fc_background_outlier_pixels;
1330
4.84k
    } else { /* DATA_TYPE_BACKGROUND */
1331
375
      mean_used_bits = MAX_USED_BITS.nc_background_mean;
1332
375
      varinace_used_bits = MAX_USED_BITS.nc_background_variance;
1333
375
      pixels_error_used_bits = MAX_USED_BITS.nc_background_outlier_pixels;
1334
375
    }
1335
5.22k
    configure_encoder_setup(&setup_mean, cfg->cmp_par_background_mean, cfg->spill_background_mean,
1336
5.22k
          cfg->round, mean_used_bits, cfg);
1337
5.22k
    configure_encoder_setup(&setup_var, cfg->cmp_par_background_variance, cfg->spill_background_variance,
1338
5.22k
          cfg->round, varinace_used_bits, cfg);
1339
5.22k
    configure_encoder_setup(&setup_pix, cfg->cmp_par_background_pixels_error, cfg->spill_background_pixels_error,
1340
5.22k
          cfg->round, pixels_error_used_bits, cfg);
1341
5.22k
  }
1342
1343
10.8k
  for (i = 0;; i++) {
1344
10.8k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1345
10.8k
            stream_len, &setup_mean);
1346
10.8k
    if (cmp_is_error(stream_len))
1347
265
      return stream_len;
1348
10.5k
    stream_len = encode_value(data_buf[i].variance, model.variance,
1349
10.5k
            stream_len, &setup_var);
1350
10.5k
    if (cmp_is_error(stream_len))
1351
1.02k
      return stream_len;
1352
9.57k
    stream_len = encode_value(data_buf[i].outlier_pixels, model.outlier_pixels,
1353
9.57k
            stream_len, &setup_pix);
1354
9.57k
    if (cmp_is_error(stream_len))
1355
293
      return stream_len;
1356
1357
9.27k
    if (up_model_buf) {
1358
5.43k
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1359
5.43k
        cfg->model_value, setup_mean.lossy_par);
1360
5.43k
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance, model.variance,
1361
5.43k
        cfg->model_value, setup_var.lossy_par);
1362
5.43k
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels, model.outlier_pixels,
1363
5.43k
        cfg->model_value, setup_pix.lossy_par);
1364
5.43k
    }
1365
1366
9.27k
    if (i >= cfg->samples-1)
1367
3.64k
      break;
1368
1369
5.63k
    model = next_model_p[i];
1370
5.63k
  }
1371
3.64k
  return stream_len;
1372
5.22k
}
1373
1374
1375
/**
1376
 * @brief compress smearing data from the normal cameras
1377
 *
1378
 * @param cfg   pointer to the compression configuration structure
1379
 * @param stream_len  already used length of the bitstream in bits
1380
 *
1381
 * @returns the bit length of the bitstream on success or an error code if it
1382
 *  fails (which can be tested with cmp_is_error())
1383
 */
1384
1385
static uint32_t compress_smearing(const struct cmp_cfg *cfg, uint32_t stream_len)
1386
4.15k
{
1387
4.15k
  size_t i;
1388
1389
4.15k
  const struct smearing *data_buf = cfg->src;
1390
4.15k
  const struct smearing *model_buf = cfg->model_buf;
1391
4.15k
  struct smearing *up_model_buf = NULL;
1392
4.15k
  const struct smearing *next_model_p;
1393
4.15k
  struct smearing model;
1394
4.15k
  struct encoder_setup setup_mean, setup_var_mean, setup_pix;
1395
1396
4.15k
  if (model_mode_is_used(cfg->cmp_mode)) {
1397
3.20k
    model = model_buf[0];
1398
3.20k
    next_model_p = &model_buf[1];
1399
3.20k
    up_model_buf = cfg->updated_model_buf;
1400
3.20k
  } else {
1401
957
    memset(&model, 0, sizeof(model));
1402
957
    next_model_p = data_buf;
1403
957
  }
1404
1405
4.15k
  configure_encoder_setup(&setup_mean, cfg->cmp_par_smearing_mean, cfg->spill_smearing_mean,
1406
4.15k
        cfg->round, MAX_USED_BITS.smearing_mean, cfg);
1407
4.15k
  configure_encoder_setup(&setup_var_mean, cfg->cmp_par_smearing_variance, cfg->spill_smearing_variance,
1408
4.15k
        cfg->round, MAX_USED_BITS.smearing_variance_mean, cfg);
1409
4.15k
  configure_encoder_setup(&setup_pix, cfg->cmp_par_smearing_pixels_error, cfg->spill_smearing_pixels_error,
1410
4.15k
        cfg->round, MAX_USED_BITS.smearing_outlier_pixels, cfg);
1411
1412
11.4k
  for (i = 0;; i++) {
1413
11.4k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1414
11.4k
            stream_len, &setup_mean);
1415
11.4k
    if (cmp_is_error(stream_len))
1416
229
      return stream_len;
1417
11.2k
    stream_len = encode_value(data_buf[i].variance_mean, model.variance_mean,
1418
11.2k
            stream_len, &setup_var_mean);
1419
11.2k
    if (cmp_is_error(stream_len))
1420
163
      return stream_len;
1421
11.0k
    stream_len = encode_value(data_buf[i].outlier_pixels, model.outlier_pixels,
1422
11.0k
            stream_len, &setup_pix);
1423
11.0k
    if (cmp_is_error(stream_len))
1424
149
      return stream_len;
1425
1426
10.9k
    if (up_model_buf) {
1427
7.81k
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1428
7.81k
        cfg->model_value, setup_mean.lossy_par);
1429
7.81k
      up_model_buf[i].variance_mean = cmp_up_model(data_buf[i].variance_mean, model.variance_mean,
1430
7.81k
        cfg->model_value, setup_var_mean.lossy_par);
1431
7.81k
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels, model.outlier_pixels,
1432
7.81k
        cfg->model_value, setup_pix.lossy_par);
1433
7.81k
    }
1434
1435
10.9k
    if (i >= cfg->samples-1)
1436
3.61k
      break;
1437
1438
7.29k
    model = next_model_p[i];
1439
7.29k
  }
1440
3.61k
  return stream_len;
1441
4.15k
}
1442
1443
1444
/**
1445
 * @brief check if two buffers are overlapping
1446
 * @see https://stackoverflow.com/a/325964
1447
 *
1448
 * @param buf_a   start address of the 1st buffer (can be NULL)
1449
 * @param size_a  byte size of the 1st buffer
1450
 * @param buf_b   start address of the 2nd buffer (can be NULL)
1451
 * @param size_b  byte size of the 2nd buffer
1452
 *
1453
 * @returns 0 if buffers are not overlapping, otherwise buffers are
1454
 *  overlapping
1455
 */
1456
1457
static int buffer_overlaps(const void *buf_a, size_t size_a,
1458
         const void *buf_b, size_t size_b)
1459
377k
{
1460
377k
  if (!buf_a)
1461
48.5k
    return 0;
1462
1463
329k
  if (!buf_b)
1464
67.6k
    return 0;
1465
1466
261k
  if ((const char *)buf_a < (const char *)buf_b + size_b &&
1467
261k
      (const char *)buf_b < (const char *)buf_a + size_a)
1468
0
    return 1;
1469
1470
261k
  return 0;
1471
261k
}
1472
1473
1474
/**
1475
 * @brief fill the last part of the bitstream with zeros
1476
 *
1477
 * @param cfg   pointer to the compression configuration structure
1478
 * @param cmp_size  length of the bitstream in bits
1479
 *
1480
 * @returns the bit length of the bitstream on success or an error code if it
1481
 *  fails (which can be tested with cmp_is_error())
1482
 */
1483
1484
static uint32_t pad_bitstream(const struct cmp_cfg *cfg, uint32_t cmp_size)
1485
68.0k
{
1486
68.0k
  unsigned int output_buf_len_bits, n_pad_bits;
1487
1488
68.0k
  if (!cfg->dst)
1489
42.3k
    return cmp_size;
1490
1491
  /* no padding in RAW mode; ALWAYS BIG-ENDIAN */
1492
25.7k
  if (cfg->cmp_mode == CMP_MODE_RAW)
1493
18.3k
    return cmp_size;
1494
1495
  /* maximum length of the bitstream in bits */
1496
7.33k
  output_buf_len_bits = cmp_stream_size_to_bits(cfg->stream_size);
1497
1498
7.33k
  n_pad_bits = 32 - (cmp_size & 0x1FU);
1499
7.33k
  if (n_pad_bits < 32) {
1500
6.61k
    FORWARD_IF_ERROR(put_n_bits32(0, n_pad_bits, cmp_size,
1501
6.61k
         cfg->dst, output_buf_len_bits), "");
1502
6.61k
  }
1503
1504
7.33k
  return cmp_size;
1505
7.33k
}
1506
1507
1508
/**
1509
 * @brief internal data compression function
1510
 * This function can compress all types of collection data (one at a time).
1511
 * This function does not take the header of a collection into account.
1512
 *
1513
 * @param cfg   pointer to the compression configuration structure
1514
 * @param stream_len  already used length of the bitstream in bits
1515
 *
1516
 * @note the validity of the cfg structure is not checked
1517
 *
1518
 * @returns the bit length of the bitstream on success or an error code if it
1519
 *  fails (which can be tested with cmp_is_error())
1520
 */
1521
1522
static uint32_t compress_data_internal(const struct cmp_cfg *cfg, uint32_t stream_len)
1523
146k
{
1524
146k
  uint32_t bitsize = 0;
1525
1526
146k
  FORWARD_IF_ERROR(stream_len, "");
1527
146k
  RETURN_ERROR_IF(cfg == NULL, GENERIC, "");
1528
146k
  RETURN_ERROR_IF(stream_len & 0x7, GENERIC, "The stream_len parameter must be a multiple of 8.");
1529
1530
146k
  if (cfg->samples == 0) /* nothing to compress we are done */
1531
60.5k
    return stream_len;
1532
1533
86.1k
  if (raw_mode_is_used(cfg->cmp_mode)) {
1534
36.0k
    uint32_t raw_size = cfg->samples * (uint32_t)size_of_a_sample(cfg->data_type);
1535
1536
36.0k
    if (cfg->dst) {
1537
18.4k
      uint32_t offset_bytes = stream_len >> 3;
1538
18.4k
      uint8_t *p = (uint8_t *)cfg->dst + offset_bytes;
1539
18.4k
      uint32_t new_stream_size = offset_bytes + raw_size;
1540
1541
18.4k
      RETURN_ERROR_IF(new_stream_size > cfg->stream_size, SMALL_BUFFER, "");
1542
18.3k
      memcpy(p, cfg->src, raw_size);
1543
18.3k
      RETURN_ERROR_IF(cpu_to_be_data_type(p, raw_size, cfg->data_type),
1544
18.3k
          INT_DATA_TYPE_UNSUPPORTED, "");
1545
18.3k
    }
1546
36.0k
    bitsize += stream_len + raw_size * 8; /* convert to bits */
1547
50.1k
  } else {
1548
50.1k
    switch (cfg->data_type) {
1549
1.17k
    case DATA_TYPE_IMAGETTE:
1550
1.17k
    case DATA_TYPE_IMAGETTE_ADAPTIVE:
1551
17.4k
    case DATA_TYPE_SAT_IMAGETTE:
1552
17.4k
    case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE:
1553
18.1k
    case DATA_TYPE_F_CAM_IMAGETTE:
1554
18.1k
    case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE:
1555
18.1k
      bitsize = compress_imagette(cfg, stream_len);
1556
18.1k
      break;
1557
1558
1.72k
    case DATA_TYPE_S_FX:
1559
1.72k
      bitsize = compress_s_fx(cfg, stream_len);
1560
1.72k
      break;
1561
2.12k
    case DATA_TYPE_S_FX_EFX:
1562
2.12k
      bitsize = compress_s_fx_efx(cfg, stream_len);
1563
2.12k
      break;
1564
1.37k
    case DATA_TYPE_S_FX_NCOB:
1565
1.37k
      bitsize = compress_s_fx_ncob(cfg, stream_len);
1566
1.37k
      break;
1567
2.29k
    case DATA_TYPE_S_FX_EFX_NCOB_ECOB:
1568
2.29k
      bitsize = compress_s_fx_efx_ncob_ecob(cfg, stream_len);
1569
2.29k
      break;
1570
1571
1572
7.69k
    case DATA_TYPE_L_FX:
1573
7.69k
      bitsize = compress_l_fx(cfg, stream_len);
1574
7.69k
      break;
1575
1.28k
    case DATA_TYPE_L_FX_EFX:
1576
1.28k
      bitsize = compress_l_fx_efx(cfg, stream_len);
1577
1.28k
      break;
1578
1.00k
    case DATA_TYPE_L_FX_NCOB:
1579
1.00k
      bitsize = compress_l_fx_ncob(cfg, stream_len);
1580
1.00k
      break;
1581
3.06k
    case DATA_TYPE_L_FX_EFX_NCOB_ECOB:
1582
3.06k
      bitsize = compress_l_fx_efx_ncob_ecob(cfg, stream_len);
1583
3.06k
      break;
1584
1585
1.15k
    case DATA_TYPE_OFFSET:
1586
1.96k
    case DATA_TYPE_F_CAM_OFFSET:
1587
1.96k
      bitsize = compress_offset(cfg, stream_len);
1588
1.96k
      break;
1589
375
    case DATA_TYPE_BACKGROUND:
1590
5.22k
    case DATA_TYPE_F_CAM_BACKGROUND:
1591
5.22k
      bitsize = compress_background(cfg, stream_len);
1592
5.22k
      break;
1593
4.15k
    case DATA_TYPE_SMEARING:
1594
4.15k
      bitsize = compress_smearing(cfg, stream_len);
1595
4.15k
      break;
1596
1597
0
    case DATA_TYPE_F_FX:
1598
0
    case DATA_TYPE_F_FX_EFX:
1599
0
    case DATA_TYPE_F_FX_NCOB:
1600
0
    case DATA_TYPE_F_FX_EFX_NCOB_ECOB:
1601
0
    case DATA_TYPE_CHUNK:
1602
0
    case DATA_TYPE_UNKNOWN:
1603
0
    default:
1604
0
      RETURN_ERROR(INT_DATA_TYPE_UNSUPPORTED, "");
1605
50.1k
    }
1606
50.1k
  }
1607
1608
86.1k
  if (cmp_is_error(bitsize))
1609
18.0k
    return bitsize;
1610
1611
68.0k
  bitsize = pad_bitstream(cfg, bitsize);
1612
1613
68.0k
  return bitsize;
1614
86.1k
}
1615
1616
1617
/**
1618
 * @brief check if the ICU buffer parameters are invalid
1619
 *
1620
 * @param cfg pointer to the compressor configuration to check
1621
 *
1622
 * @returns 0 if the buffer parameters are valid, otherwise invalid
1623
 */
1624
1625
static uint32_t check_compression_buffers(const struct cmp_cfg *cfg)
1626
92.1k
{
1627
92.1k
  size_t data_size;
1628
1629
92.1k
  RETURN_ERROR_IF(cfg == NULL, GENERIC, "");
1630
1631
92.1k
  RETURN_ERROR_IF(cfg->src == NULL, CHUNK_NULL, "");
1632
1633
92.1k
  data_size = size_of_a_sample(cfg->data_type) * cfg->samples;
1634
1635
92.1k
  if (cfg->samples == 0)
1636
40.8k
    debug_print("Warning: The samples parameter is 0. No data are compressed. This behavior may not be intended.");
1637
1638
92.1k
  RETURN_ERROR_IF(buffer_overlaps(cfg->dst, cfg->stream_size,
1639
92.1k
          cfg->src, data_size), PAR_BUFFERS,
1640
92.1k
    "The compressed data buffer and the data to compress buffer are overlapping.");
1641
1642
92.1k
  if (model_mode_is_used(cfg->cmp_mode)) {
1643
71.4k
    RETURN_ERROR_IF(cfg->model_buf == NULL, PAR_NO_MODEL, "");
1644
1645
71.4k
    RETURN_ERROR_IF(buffer_overlaps(cfg->model_buf, data_size,
1646
71.4k
            cfg->src, data_size), PAR_BUFFERS,
1647
71.4k
        "The model buffer and the data to compress buffer are overlapping.");
1648
71.4k
    RETURN_ERROR_IF(buffer_overlaps(cfg->model_buf, data_size,
1649
71.4k
            cfg->dst, cfg->stream_size), PAR_BUFFERS,
1650
71.4k
        "The model buffer and the compressed data buffer are overlapping.");
1651
1652
71.4k
    RETURN_ERROR_IF(buffer_overlaps(cfg->updated_model_buf, data_size,
1653
71.4k
            cfg->src, data_size), PAR_BUFFERS,
1654
71.4k
        "The updated model buffer and the data to compress buffer are overlapping.");
1655
71.4k
    RETURN_ERROR_IF(buffer_overlaps(cfg->updated_model_buf, data_size,
1656
71.4k
            cfg->dst, cfg->stream_size), PAR_BUFFERS,
1657
71.4k
        "The updated model buffer and the compressed data buffer are overlapping.");
1658
71.4k
  }
1659
1660
92.1k
  return CMP_ERROR(NO_ERROR);
1661
92.1k
}
1662
1663
1664
/**
1665
 * @brief checks if the ICU compression configuration is valid
1666
 *
1667
 * @param cfg pointer to the cmp_cfg structure to be validated
1668
 *
1669
 * @returns an error code if any of the configuration parameters are invalid,
1670
 *  otherwise returns CMP_ERROR_NO_ERROR on valid configuration
1671
 */
1672
1673
static uint32_t cmp_cfg_icu_is_invalid_error_code(const struct cmp_cfg *cfg)
1674
92.2k
{
1675
1676
92.2k
  RETURN_ERROR_IF(cmp_cfg_gen_par_is_invalid(cfg), PAR_GENERIC, "");
1677
1678
92.2k
  if (cmp_imagette_data_type_is_used(cfg->data_type))
1679
18.9k
    RETURN_ERROR_IF(cmp_cfg_imagette_is_invalid(cfg), PAR_SPECIFIC, "");
1680
73.2k
  else if (cmp_fx_cob_data_type_is_used(cfg->data_type))
1681
57.8k
    RETURN_ERROR_IF(cmp_cfg_fx_cob_is_invalid(cfg), PAR_SPECIFIC, "");
1682
15.4k
  else
1683
15.4k
    RETURN_ERROR_IF(cmp_cfg_aux_is_invalid(cfg), PAR_SPECIFIC, "");
1684
1685
92.1k
  FORWARD_IF_ERROR(check_compression_buffers(cfg), "");
1686
1687
92.1k
  return CMP_ERROR(NO_ERROR);
1688
92.1k
}
1689
1690
1691
/**
1692
 * @brief calculate the optimal spill threshold value for zero escape mechanism
1693
 *
1694
 * @param golomb_par  Golomb parameter
1695
 * @param max_data_bits maximum number of used data bits
1696
 *
1697
 * @returns the highest optimal spill threshold value for a given Golomb
1698
 *  parameter, when the zero escape mechanism is used or 0 if the
1699
 *  Golomb parameter is not valid
1700
 */
1701
1702
static uint32_t cmp_best_zero_spill(uint32_t golomb_par, uint32_t max_data_bits)
1703
11.8k
{
1704
11.8k
  uint32_t const max_spill = cmp_icu_max_spill(golomb_par);
1705
11.8k
  uint32_t cutoff;
1706
11.8k
  uint32_t spill;
1707
1708
11.8k
  if (golomb_par < MIN_NON_IMA_GOLOMB_PAR)
1709
759
    return 0;
1710
11.1k
  if (golomb_par > MAX_NON_IMA_GOLOMB_PAR)
1711
542
    return 0;
1712
1713
10.5k
  cutoff = (0x2U << ilog_2(golomb_par)) - golomb_par;
1714
10.5k
  spill = max_data_bits * golomb_par + cutoff;
1715
10.5k
  if (spill > max_spill)
1716
8.71k
    spill = max_spill;
1717
1718
10.5k
  return spill;
1719
11.1k
}
1720
1721
1722
/**
1723
 * @brief estimate a "good" spillover threshold parameter
1724
 *
1725
 * @param golomb_par  Golomb parameter
1726
 * @param cmp_mode  compression mode
1727
 * @param max_data_bits maximum number of used data bits
1728
 *
1729
 * @returns a spillover threshold parameter or 0 if the Golomb parameter is not
1730
 *  valid
1731
 */
1732
1733
static uint32_t cmp_get_spill(uint32_t golomb_par, enum cmp_mode cmp_mode,
1734
            uint32_t max_data_bits)
1735
19.0k
{
1736
19.0k
  if (zero_escape_mech_is_used(cmp_mode))
1737
11.8k
    return cmp_best_zero_spill(golomb_par, max_data_bits);
1738
1739
7.19k
  return cmp_icu_max_spill(golomb_par);
1740
19.0k
}
1741
1742
1743
/**
1744
 * @brief set the compressed collection size field
1745
 *
1746
 * @param cmp_col_size_field  pointer to the compressed collection size field
1747
 * @param cmp_col_size    size of the compressed collection (not including
1748
 *        the compressed collection header size and the
1749
 *        size of the compressed collection size field
1750
 *        itself)
1751
 *
1752
 * @returns error code
1753
 */
1754
1755
static uint32_t set_cmp_col_size(uint8_t *cmp_col_size_field, uint32_t cmp_col_size)
1756
45.6k
{
1757
45.6k
  uint16_t const v = cpu_to_be16((uint16_t)cmp_col_size);
1758
1759
45.6k
  RETURN_ERROR_IF(cmp_col_size > UINT16_MAX, INT_CMP_COL_TOO_LARGE,
1760
45.6k
      "%"PRIu32" is bigger than the maximum allowed compression collection size", cmp_col_size_field);
1761
1762
45.6k
  memcpy(cmp_col_size_field, &v, CMP_COLLECTION_FILD_SIZE);
1763
1764
45.6k
  return 0;
1765
45.6k
}
1766
1767
1768
/**
1769
 * @brief compresses a collection (with a collection header followed by data)
1770
 *
1771
 * @param col   pointer to a collection header
1772
 * @param model   pointer to the model to be used for compression, or NULL
1773
 *      if not applicable
1774
 * @param updated_model pointer to the buffer where the updated model will be
1775
 *      stored, or NULL if not applicable
1776
 * @param dst   pointer to the buffer where the compressed data will be
1777
 *      stored, or NULL to only get the compressed data size
1778
 * @param dst_capacity  the size of the dst buffer in bytes
1779
 * @param cfg   pointer to a compression configuration
1780
 * @param dst_size  the already used size of the dst buffer in bytes
1781
 *
1782
 * @returns the size of the compressed data in bytes (new dst_size) on
1783
 *  success or an error code if it fails (which can be tested with
1784
 *  cmp_is_error())
1785
 */
1786
static uint32_t cmp_collection(const uint8_t *col,
1787
             const uint8_t *model, uint8_t *updated_model,
1788
             uint32_t *dst, uint32_t dst_capacity,
1789
             struct cmp_cfg *cfg, uint32_t dst_size)
1790
92.2k
{
1791
92.2k
  uint32_t const dst_size_begin = dst_size;
1792
92.2k
  uint32_t dst_size_bits;
1793
92.2k
  const struct collection_hdr *col_hdr = (const struct collection_hdr *)col;
1794
92.2k
  uint16_t const col_data_length = cmp_col_get_data_length(col_hdr);
1795
92.2k
  uint16_t sample_size;
1796
1797
  /* sanity check of the collection header */
1798
92.2k
  cfg->data_type = convert_subservice_to_cmp_data_type(cmp_col_get_subservice(col_hdr));
1799
92.2k
  sample_size = (uint16_t)size_of_a_sample(cfg->data_type);
1800
92.2k
  RETURN_ERROR_IF(col_data_length % sample_size, COL_SIZE_INCONSISTENT,
1801
92.2k
      "col_data_length: %u %% sample_size: %u != 0", col_data_length, sample_size);
1802
92.2k
  cfg->samples = col_data_length/sample_size;
1803
1804
  /* prepare the different buffers */
1805
92.2k
  cfg->src = col + COLLECTION_HDR_SIZE;
1806
92.2k
  if (model)
1807
71.9k
    cfg->model_buf = model + COLLECTION_HDR_SIZE;
1808
92.2k
  if (updated_model)
1809
85.2k
    cfg->updated_model_buf = updated_model + COLLECTION_HDR_SIZE;
1810
92.2k
  cfg->dst = dst;
1811
92.2k
  cfg->stream_size = dst_capacity;
1812
92.2k
  FORWARD_IF_ERROR(cmp_cfg_icu_is_invalid_error_code(cfg), "");
1813
1814
92.1k
  if (cfg->cmp_mode != CMP_MODE_RAW) {
1815
    /* hear we reserve space for the compressed data size field */
1816
90.3k
    dst_size += CMP_COLLECTION_FILD_SIZE;
1817
90.3k
  }
1818
1819
  /* we do not compress the collection header, we simply copy the header
1820
   * into the compressed data
1821
   */
1822
92.1k
  if (dst) {
1823
46.5k
    RETURN_ERROR_IF(dst_size + COLLECTION_HDR_SIZE > dst_capacity,
1824
46.5k
        SMALL_BUFFER, "");
1825
46.4k
    memcpy((uint8_t *)dst + dst_size, col, COLLECTION_HDR_SIZE);
1826
46.4k
  }
1827
92.0k
  dst_size += COLLECTION_HDR_SIZE;
1828
92.0k
  if (model_mode_is_used(cfg->cmp_mode) && updated_model)
1829
69.8k
    memcpy(updated_model, col, COLLECTION_HDR_SIZE);
1830
1831
  /* is enough capacity in the dst buffer to store the data uncompressed */
1832
92.0k
  if ((dst == NULL || dst_capacity >= dst_size + col_data_length) &&
1833
92.0k
      cfg->cmp_mode != CMP_MODE_RAW) {
1834
    /* we set the compressed buffer size to the data size -1 to provoke
1835
     * a CMP_ERROR_SMALL_BUFFER error if the data are not compressible
1836
     */
1837
90.0k
    cfg->stream_size = dst_size + col_data_length - 1;
1838
90.0k
    dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1839
1840
90.0k
    if (cmp_get_error_code(dst_size_bits) == CMP_ERROR_SMALL_BUFFER ||
1841
90.0k
        (!dst && dst_size_bits > cmp_stream_size_to_bits(cfg->stream_size))) { /* if dst == NULL compress_data_internal will not return a CMP_ERROR_SMALL_BUFFER */
1842
      /* can not compress the data with the given parameters;
1843
       * put them uncompressed (raw) into the dst buffer */
1844
54.6k
      enum cmp_mode cmp_mode_cpy = cfg->cmp_mode;
1845
1846
54.6k
      cfg->stream_size = dst_size + col_data_length;
1847
54.6k
      cfg->cmp_mode = CMP_MODE_RAW;
1848
54.6k
      dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1849
54.6k
      cfg->cmp_mode = cmp_mode_cpy;
1850
      /* updated model is in this case a copy of the data to compress */
1851
54.6k
      if (model_mode_is_used(cfg->cmp_mode) && cfg->updated_model_buf)
1852
42.5k
        memcpy(cfg->updated_model_buf, cfg->src, col_data_length);
1853
54.6k
    }
1854
90.0k
  } else {
1855
1.98k
    cfg->stream_size = dst_capacity;
1856
1.98k
    dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1857
1.98k
  }
1858
92.0k
  FORWARD_IF_ERROR(dst_size_bits, "compression failed");
1859
1860
91.9k
  dst_size = cmp_bit_to_byte(dst_size_bits);
1861
91.9k
  if (cfg->cmp_mode != CMP_MODE_RAW && dst) {
1862
45.6k
    uint8_t *cmp_col_size_field = (uint8_t *)dst+dst_size_begin;
1863
45.6k
    uint32_t cmp_col_size = dst_size - dst_size_begin -
1864
45.6k
      COLLECTION_HDR_SIZE - CMP_COLLECTION_FILD_SIZE;
1865
1866
45.6k
    FORWARD_IF_ERROR(set_cmp_col_size(cmp_col_size_field, cmp_col_size), "");
1867
45.6k
  }
1868
1869
91.9k
  return dst_size;
1870
91.9k
}
1871
1872
1873
/**
1874
 * @brief builds a compressed entity header for a compressed chunk
1875
 *
1876
 * @param entity    start address of the compression entity header
1877
 *        (can be NULL if you only want the entity header
1878
 *        size)
1879
 * @param chunk_size    the original size of the chunk in bytes
1880
 * @param cfg     pointer to the compression configuration used to
1881
 *        compress the chunk
1882
 * @param start_timestamp the start timestamp of the chunk compression
1883
 * @param cmp_ent_size_byte the size of the compression entity (entity
1884
 *        header plus compressed data)
1885
 *
1886
 * @return the size of the compressed entity header in bytes or an error code
1887
 *  if it fails (which can be tested with cmp_is_error())
1888
 */
1889
1890
static uint32_t cmp_ent_build_chunk_header(uint32_t *entity, uint32_t chunk_size,
1891
             const struct cmp_cfg *cfg, uint64_t start_timestamp,
1892
             uint32_t cmp_ent_size_byte)
1893
7.76k
{
1894
7.76k
  if (entity) { /* setup the compressed entity header */
1895
1.54k
    struct cmp_entity *ent = (struct cmp_entity *)entity;
1896
1.54k
    int err = 0;
1897
1898
1.54k
    err |= cmp_ent_set_version_id(ent, version_identifier); /* set by compress_chunk_init */
1899
1.54k
    err |= cmp_ent_set_size(ent, cmp_ent_size_byte);
1900
1.54k
    err |= cmp_ent_set_original_size(ent, chunk_size);
1901
1.54k
    err |= cmp_ent_set_data_type(ent, DATA_TYPE_CHUNK, cfg->cmp_mode == CMP_MODE_RAW);
1902
1.54k
    err |= cmp_ent_set_cmp_mode(ent, cfg->cmp_mode);
1903
1.54k
    err |= cmp_ent_set_model_value(ent, cfg->model_value);
1904
    /* model id/counter are set by the user with the compress_chunk_set_model_id_and_counter() */
1905
1.54k
    err |= cmp_ent_set_model_id(ent, 0);
1906
1.54k
    err |= cmp_ent_set_model_counter(ent, 0);
1907
1.54k
    err |= cmp_ent_set_reserved(ent, 0);
1908
1.54k
    err |= cmp_ent_set_lossy_cmp_par(ent, cfg->round);
1909
1.54k
    if (cfg->cmp_mode != CMP_MODE_RAW) {
1910
1.40k
      err |= cmp_ent_set_non_ima_spill1(ent, cfg->spill_par_1);
1911
1.40k
      err |= cmp_ent_set_non_ima_cmp_par1(ent, cfg->cmp_par_1);
1912
1.40k
      err |= cmp_ent_set_non_ima_spill2(ent, cfg->spill_par_2);
1913
1.40k
      err |= cmp_ent_set_non_ima_cmp_par2(ent, cfg->cmp_par_2);
1914
1.40k
      err |= cmp_ent_set_non_ima_spill3(ent, cfg->spill_par_3);
1915
1.40k
      err |= cmp_ent_set_non_ima_cmp_par3(ent, cfg->cmp_par_3);
1916
1.40k
      err |= cmp_ent_set_non_ima_spill4(ent, cfg->spill_par_4);
1917
1.40k
      err |= cmp_ent_set_non_ima_cmp_par4(ent, cfg->cmp_par_4);
1918
1.40k
      err |= cmp_ent_set_non_ima_spill5(ent, cfg->spill_par_5);
1919
1.40k
      err |= cmp_ent_set_non_ima_cmp_par5(ent, cfg->cmp_par_5);
1920
1.40k
      err |= cmp_ent_set_non_ima_spill6(ent, cfg->spill_par_6);
1921
1.40k
      err |= cmp_ent_set_non_ima_cmp_par6(ent, cfg->cmp_par_6);
1922
1.40k
    }
1923
1.54k
    RETURN_ERROR_IF(err, ENTITY_HEADER, "");
1924
1.31k
    RETURN_ERROR_IF(cmp_ent_set_start_timestamp(ent, start_timestamp),
1925
1.31k
        ENTITY_TIMESTAMP, "");
1926
1.31k
    RETURN_ERROR_IF(cmp_ent_set_end_timestamp(ent, get_timestamp()),
1927
1.31k
        ENTITY_TIMESTAMP, "");
1928
1.31k
  }
1929
1930
7.53k
  if (cfg->cmp_mode == CMP_MODE_RAW)
1931
482
    return GENERIC_HEADER_SIZE;
1932
7.05k
  else
1933
7.05k
    return NON_IMAGETTE_HEADER_SIZE;
1934
7.53k
}
1935
1936
1937
/**
1938
 * @brief Set the compression configuration from the compression parameters
1939
 *  based on the chunk type of the collection
1940
 *
1941
 * @param[in] col pointer to a collection header
1942
 * @param[in] par pointer to a compression parameters struct
1943
 * @param[out] cfg  pointer to a compression configuration
1944
 *
1945
 * @returns the chunk type of the collection
1946
 */
1947
1948
static enum chunk_type init_cmp_cfg_from_cmp_par(const struct collection_hdr *col,
1949
             const struct cmp_par *par,
1950
             struct cmp_cfg *cfg)
1951
4.21k
{
1952
4.21k
  enum chunk_type chunk_type = cmp_col_get_chunk_type(col);
1953
1954
4.21k
  memset(cfg, 0, sizeof(struct cmp_cfg));
1955
1956
  /* the ranges of the parameters are checked in cmp_cfg_icu_is_invalid_error_code() */
1957
4.21k
  cfg->cmp_mode = par->cmp_mode;
1958
4.21k
  cfg->model_value = par->model_value;
1959
4.21k
  if (par->lossy_par)
1960
3.15k
    debug_print("Warning: lossy compression is not supported for chunk compression, lossy_par will be ignored.");
1961
4.21k
  cfg->round = 0;
1962
1963
4.21k
  switch (chunk_type) {
1964
227
  case CHUNK_TYPE_NCAM_IMAGETTE:
1965
227
    cfg->cmp_par_imagette = par->nc_imagette;
1966
227
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette, cfg->cmp_mode,
1967
227
                MAX_USED_BITS.nc_imagette);
1968
227
    break;
1969
563
  case CHUNK_TYPE_SAT_IMAGETTE:
1970
563
    cfg->cmp_par_imagette = par->saturated_imagette;
1971
563
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette, cfg->cmp_mode,
1972
563
                MAX_USED_BITS.saturated_imagette);
1973
563
    break;
1974
1.24k
  case CHUNK_TYPE_SHORT_CADENCE:
1975
1.24k
    cfg->cmp_par_exp_flags = par->s_exp_flags;
1976
1.24k
    cfg->spill_exp_flags = cmp_get_spill(cfg->cmp_par_exp_flags, cfg->cmp_mode,
1977
1.24k
                 MAX_USED_BITS.s_exp_flags);
1978
1.24k
    cfg->cmp_par_fx = par->s_fx;
1979
1.24k
    cfg->spill_fx = cmp_get_spill(cfg->cmp_par_fx, cfg->cmp_mode,
1980
1.24k
                MAX_USED_BITS.s_fx);
1981
1.24k
    cfg->cmp_par_ncob = par->s_ncob;
1982
1.24k
    cfg->spill_ncob = cmp_get_spill(cfg->cmp_par_ncob, cfg->cmp_mode,
1983
1.24k
            MAX_USED_BITS.s_ncob);
1984
1.24k
    cfg->cmp_par_efx = par->s_efx;
1985
1.24k
    cfg->spill_efx = cmp_get_spill(cfg->cmp_par_efx, cfg->cmp_mode,
1986
1.24k
                 MAX_USED_BITS.s_efx);
1987
1.24k
    cfg->cmp_par_ecob = par->s_ecob;
1988
1.24k
    cfg->spill_ecob = cmp_get_spill(cfg->cmp_par_ecob, cfg->cmp_mode,
1989
1.24k
            MAX_USED_BITS.s_ecob);
1990
1.24k
    break;
1991
1.20k
  case CHUNK_TYPE_LONG_CADENCE:
1992
1.20k
    cfg->cmp_par_exp_flags = par->l_exp_flags;
1993
1.20k
    cfg->spill_exp_flags = cmp_get_spill(cfg->cmp_par_exp_flags, cfg->cmp_mode,
1994
1.20k
                 MAX_USED_BITS.l_exp_flags);
1995
1.20k
    cfg->cmp_par_fx = par->l_fx;
1996
1.20k
    cfg->spill_fx = cmp_get_spill(cfg->cmp_par_fx, cfg->cmp_mode,
1997
1.20k
                MAX_USED_BITS.l_fx);
1998
1.20k
    cfg->cmp_par_ncob = par->l_ncob;
1999
1.20k
    cfg->spill_ncob = cmp_get_spill(cfg->cmp_par_ncob, cfg->cmp_mode,
2000
1.20k
            MAX_USED_BITS.l_ncob);
2001
1.20k
    cfg->cmp_par_efx = par->l_efx;
2002
1.20k
    cfg->spill_efx = cmp_get_spill(cfg->cmp_par_efx, cfg->cmp_mode,
2003
1.20k
                 MAX_USED_BITS.l_efx);
2004
1.20k
    cfg->cmp_par_ecob = par->l_ecob;
2005
1.20k
    cfg->spill_ecob = cmp_get_spill(cfg->cmp_par_ecob, cfg->cmp_mode,
2006
1.20k
            MAX_USED_BITS.l_ecob);
2007
1.20k
    cfg->cmp_par_fx_cob_variance = par->l_fx_cob_variance;
2008
1.20k
    cfg->spill_fx_cob_variance = cmp_get_spill(cfg->cmp_par_fx_cob_variance,
2009
1.20k
                 cfg->cmp_mode, MAX_USED_BITS.l_fx_cob_variance);
2010
1.20k
    break;
2011
208
  case CHUNK_TYPE_OFFSET_BACKGROUND:
2012
208
    cfg->cmp_par_offset_mean = par->nc_offset_mean;
2013
208
    cfg->spill_offset_mean = cmp_get_spill(cfg->cmp_par_offset_mean,
2014
208
            cfg->cmp_mode, MAX_USED_BITS.nc_offset_mean);
2015
208
    cfg->cmp_par_offset_variance = par->nc_offset_variance;
2016
208
    cfg->spill_offset_variance = cmp_get_spill(cfg->cmp_par_offset_variance,
2017
208
            cfg->cmp_mode, MAX_USED_BITS.nc_offset_variance);
2018
208
    cfg->cmp_par_background_mean = par->nc_background_mean;
2019
208
    cfg->spill_background_mean = cmp_get_spill(cfg->cmp_par_background_mean,
2020
208
            cfg->cmp_mode, MAX_USED_BITS.nc_background_mean);
2021
208
    cfg->cmp_par_background_variance = par->nc_background_variance;
2022
208
    cfg->spill_background_variance = cmp_get_spill(cfg->cmp_par_background_variance,
2023
208
            cfg->cmp_mode, MAX_USED_BITS.nc_background_variance);
2024
208
    cfg->cmp_par_background_pixels_error = par->nc_background_outlier_pixels;
2025
208
    cfg->spill_background_pixels_error = cmp_get_spill(cfg->cmp_par_background_pixels_error,
2026
208
            cfg->cmp_mode, MAX_USED_BITS.nc_background_outlier_pixels);
2027
208
    break;
2028
2029
276
  case CHUNK_TYPE_SMEARING:
2030
276
    cfg->cmp_par_smearing_mean = par->smearing_mean;
2031
276
    cfg->spill_smearing_mean = cmp_get_spill(cfg->cmp_par_smearing_mean,
2032
276
            cfg->cmp_mode, MAX_USED_BITS.smearing_mean);
2033
276
    cfg->cmp_par_smearing_variance = par->smearing_variance_mean;
2034
276
    cfg->spill_smearing_variance = cmp_get_spill(cfg->cmp_par_smearing_variance,
2035
276
            cfg->cmp_mode, MAX_USED_BITS.smearing_variance_mean);
2036
276
    cfg->cmp_par_smearing_pixels_error = par->smearing_outlier_pixels;
2037
276
    cfg->spill_smearing_pixels_error = cmp_get_spill(cfg->cmp_par_smearing_pixels_error,
2038
276
            cfg->cmp_mode, MAX_USED_BITS.smearing_outlier_pixels);
2039
276
    break;
2040
2041
485
  case CHUNK_TYPE_F_CHAIN:
2042
485
    cfg->cmp_par_imagette = par->fc_imagette;
2043
485
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette,
2044
485
            cfg->cmp_mode, MAX_USED_BITS.fc_imagette);
2045
2046
485
    cfg->cmp_par_offset_mean = par->fc_offset_mean;
2047
485
    cfg->spill_offset_mean = cmp_get_spill(cfg->cmp_par_offset_mean,
2048
485
            cfg->cmp_mode, MAX_USED_BITS.fc_offset_mean);
2049
485
    cfg->cmp_par_offset_variance = par->fc_offset_variance;
2050
485
    cfg->spill_offset_variance = cmp_get_spill(cfg->cmp_par_offset_variance,
2051
485
            cfg->cmp_mode, MAX_USED_BITS.fc_offset_variance);
2052
2053
485
    cfg->cmp_par_background_mean = par->fc_background_mean;
2054
485
    cfg->spill_background_mean = cmp_get_spill(cfg->cmp_par_background_mean,
2055
485
            cfg->cmp_mode, MAX_USED_BITS.fc_background_mean);
2056
485
    cfg->cmp_par_background_variance = par->fc_background_variance;
2057
485
    cfg->spill_background_variance = cmp_get_spill(cfg->cmp_par_background_variance,
2058
485
            cfg->cmp_mode, MAX_USED_BITS.fc_background_variance);
2059
485
    cfg->cmp_par_background_pixels_error = par->fc_background_outlier_pixels;
2060
485
    cfg->spill_background_pixels_error = cmp_get_spill(cfg->cmp_par_background_pixels_error,
2061
485
            cfg->cmp_mode, MAX_USED_BITS.fc_background_outlier_pixels);
2062
485
    break;
2063
1
  case CHUNK_TYPE_UNKNOWN:
2064
1
  default: /*
2065
      * default case never reached because cmp_col_get_chunk_type
2066
      * returns CHUNK_TYPE_UNKNOWN if the type is unknown
2067
      */
2068
1
    chunk_type = CHUNK_TYPE_UNKNOWN;
2069
1
    break;
2070
4.21k
  }
2071
2072
4.21k
  return chunk_type;
2073
4.21k
}
2074
2075
2076
/**
2077
 * @brief initialise the compress_chunk() function
2078
 *
2079
 * If not initialised the compress_chunk() function sets the timestamps and
2080
 * version_id in the compression entity header to zero
2081
 *
2082
 * @param return_timestamp  pointer to a function returning a current 48-bit
2083
 *        timestamp
2084
 * @param version_id    application software version identifier
2085
 */
2086
2087
void compress_chunk_init(uint64_t (*return_timestamp)(void), uint32_t version_id)
2088
0
{
2089
0
  if (return_timestamp)
2090
0
    get_timestamp = return_timestamp;
2091
2092
0
  version_identifier = version_id;
2093
0
}
2094
2095
2096
/**
2097
 * @brief compress a data chunk consisting of put together data collections
2098
 *
2099
 * @param chunk     pointer to the chunk to be compressed
2100
 * @param chunk_size    byte size of the chunk
2101
 * @param chunk_model   pointer to a model of a chunk; has the same size
2102
 *        as the chunk (can be NULL if no model compression
2103
 *        mode is used)
2104
 * @param updated_chunk_model pointer to store the updated model for the next
2105
 *        model mode compression; has the same size as the
2106
 *        chunk (can be the same as the model_of_data
2107
 *        buffer for in-place update or NULL if updated
2108
 *        model is not needed)
2109
 * @param dst     destination pointer to the compressed data
2110
 *        buffer; has to be 4-byte aligned; can be NULL to
2111
 *        only get the compressed data size
2112
 * @param dst_capacity    capacity of the dst buffer; it's recommended to
2113
 *        provide a dst_capacity >=
2114
 *        compress_chunk_cmp_size_bound(chunk, chunk_size)
2115
 *        as it eliminates one potential failure scenario:
2116
 *        not enough space in the dst buffer to write the
2117
 *        compressed data; size is internally rounded down
2118
 *        to a multiple of 4
2119
 * @param cmp_par   pointer to a compression parameters struct
2120
 * @returns the byte size of the compressed data or an error code if it
2121
 *  fails (which can be tested with cmp_is_error())
2122
 */
2123
2124
uint32_t compress_chunk(const void *chunk, uint32_t chunk_size,
2125
      const void *chunk_model, void *updated_chunk_model,
2126
      uint32_t *dst, uint32_t dst_capacity,
2127
      const struct cmp_par *cmp_par)
2128
4.36k
{
2129
4.36k
  uint64_t const start_timestamp = get_timestamp();
2130
4.36k
  const struct collection_hdr *col = (const struct collection_hdr *)chunk;
2131
4.36k
  enum chunk_type chunk_type;
2132
4.36k
  struct cmp_cfg cfg;
2133
4.36k
  uint32_t cmp_size_byte; /* size of the compressed data in bytes */
2134
4.36k
  size_t read_bytes;
2135
2136
4.36k
  RETURN_ERROR_IF(chunk == NULL, CHUNK_NULL, "");
2137
4.36k
  RETURN_ERROR_IF(cmp_par == NULL, PAR_NULL, "");
2138
4.22k
  RETURN_ERROR_IF(chunk_size < COLLECTION_HDR_SIZE, CHUNK_SIZE_INCONSISTENT,
2139
4.22k
      "chunk_size: %"PRIu32"", chunk_size);
2140
4.21k
  RETURN_ERROR_IF(chunk_size > CMP_ENTITY_MAX_ORIGINAL_SIZE, CHUNK_TOO_LARGE,
2141
4.21k
      "chunk_size: %"PRIu32"", chunk_size);
2142
2143
4.21k
  chunk_type = init_cmp_cfg_from_cmp_par(col, cmp_par, &cfg);
2144
4.21k
  RETURN_ERROR_IF(chunk_type == CHUNK_TYPE_UNKNOWN, COL_SUBSERVICE_UNSUPPORTED,
2145
4.21k
      "unsupported subservice: %u", cmp_col_get_subservice(col));
2146
2147
  /* reserve space for the compression entity header, we will build the
2148
   * header after the compression of the chunk
2149
   */
2150
4.21k
  cmp_size_byte = cmp_ent_build_chunk_header(NULL, chunk_size, &cfg, start_timestamp, 0);
2151
4.21k
  RETURN_ERROR_IF(dst && dst_capacity < cmp_size_byte, SMALL_BUFFER,
2152
4.21k
      "dst_capacity must be at least as large as the minimum size of the compression unit.");
2153
2154
2155
  /* compress one collection after another */
2156
4.21k
  for (read_bytes = 0;
2157
96.1k
       read_bytes <= chunk_size - COLLECTION_HDR_SIZE;
2158
92.4k
       read_bytes += cmp_col_get_size(col)) {
2159
92.4k
    const uint8_t *col_model = NULL;
2160
92.4k
    uint8_t *col_up_model = NULL;
2161
2162
    /* setup pointers for the next collection we want to compress */
2163
92.4k
    col = (const struct collection_hdr *)((const uint8_t *)chunk + read_bytes);
2164
92.4k
    if (chunk_model)
2165
72.1k
      col_model = (const uint8_t *)chunk_model + read_bytes;
2166
92.4k
    if (updated_chunk_model)
2167
85.4k
      col_up_model = (uint8_t *)updated_chunk_model + read_bytes;
2168
2169
92.4k
    RETURN_ERROR_IF(cmp_col_get_chunk_type(col) != chunk_type, CHUNK_SUBSERVICE_INCONSISTENT, "");
2170
2171
    /* chunk size is inconsistent with the sum of sizes in the collection headers */
2172
92.2k
    if (read_bytes + cmp_col_get_size(col) > chunk_size)
2173
18
      break;
2174
2175
92.2k
    cmp_size_byte = cmp_collection((const uint8_t *)col, col_model, col_up_model,
2176
92.2k
                 dst, dst_capacity, &cfg, cmp_size_byte);
2177
92.2k
    FORWARD_IF_ERROR(cmp_size_byte, "error occurred when compressing the collection with offset %u", read_bytes);
2178
92.2k
  }
2179
2180
3.71k
  RETURN_ERROR_IF(read_bytes != chunk_size, CHUNK_SIZE_INCONSISTENT, "");
2181
2182
3.54k
  FORWARD_IF_ERROR(cmp_ent_build_chunk_header(dst, chunk_size, &cfg,
2183
3.54k
              start_timestamp, cmp_size_byte), "");
2184
2185
3.31k
  return cmp_size_byte;
2186
3.54k
}
2187
2188
2189
/**
2190
 * @brief returns the maximum compressed size in a worst-case scenario
2191
 * In case the input data is not compressible
2192
 * This function is primarily useful for memory allocation purposes
2193
 * (destination buffer size).
2194
 *
2195
 * @note if the number of collections is known you can use the
2196
 *  COMPRESS_CHUNK_BOUND macro for compilation-time evaluation
2197
 *  (stack memory allocation for example)
2198
 *
2199
 * @param chunk   pointer to the chunk you want to compress
2200
 * @param chunk_size  size of the chunk in bytes
2201
 *
2202
 * @returns maximum compressed size for a chunk compression on success or an
2203
 *  error code if it fails (which can be tested with cmp_is_error())
2204
 */
2205
2206
uint32_t compress_chunk_cmp_size_bound(const void *chunk, size_t chunk_size)
2207
2.70k
{
2208
2.70k
  int32_t read_bytes;
2209
2.70k
  uint32_t num_col = 0;
2210
2.70k
  size_t bound;
2211
2.70k
  size_t const max_chunk_size = CMP_ENTITY_MAX_ORIGINAL_SIZE
2212
2.70k
    - NON_IMAGETTE_HEADER_SIZE - CMP_COLLECTION_FILD_SIZE;
2213
2214
2.70k
  RETURN_ERROR_IF(chunk == NULL, CHUNK_NULL, "");
2215
2.70k
  RETURN_ERROR_IF(chunk_size < COLLECTION_HDR_SIZE, CHUNK_SIZE_INCONSISTENT, "");
2216
2.57k
  RETURN_ERROR_IF(chunk_size > max_chunk_size, CHUNK_TOO_LARGE,
2217
2.57k
      "chunk_size: %"PRIu32" > max_chunk_size: %"PRIu32"",
2218
2.57k
      chunk_size, max_chunk_size);
2219
2220
  /* count the number of collections in the chunk */
2221
2.57k
  for (read_bytes = 0;
2222
55.3k
       read_bytes <= (int32_t)(chunk_size-COLLECTION_HDR_SIZE);
2223
52.7k
       read_bytes += cmp_col_get_size((const struct collection_hdr *)
2224
52.7k
              ((const uint8_t *)chunk + read_bytes)))
2225
52.7k
    num_col++;
2226
2227
2.57k
  RETURN_ERROR_IF((uint32_t)read_bytes != chunk_size, CHUNK_SIZE_INCONSISTENT, "");
2228
2229
2.16k
  bound = COMPRESS_CHUNK_BOUND_UNSAFE(chunk_size, num_col);
2230
2.16k
  RETURN_ERROR_IF(bound > CMP_ENTITY_MAX_SIZE, CHUNK_TOO_LARGE, "bound: %lu", bound);
2231
2232
2.16k
  return (uint32_t)bound;
2233
2.16k
}
2234
2235
2236
/**
2237
 * @brief set the model id and model counter in the compression entity header
2238
 *
2239
 * @param dst   pointer to the compressed data (starting with a
2240
 *      compression entity header)
2241
 * @param dst_size  byte size of the dst buffer
2242
 * @param model_id  model identifier; for identifying entities that originate
2243
 *      from the same starting model
2244
 * @param model_counter model_counter; counts how many times the model was
2245
 *      updated; for non model mode compression use 0
2246
 *
2247
 * @returns the byte size of the dst buffer (= dst_size) on success or an error
2248
 *  code if it fails (which can be tested with cmp_is_error())
2249
 */
2250
2251
uint32_t compress_chunk_set_model_id_and_counter(void *dst, uint32_t dst_size,
2252
             uint16_t model_id, uint8_t model_counter)
2253
0
{
2254
0
  RETURN_ERROR_IF(dst == NULL, ENTITY_NULL, "");
2255
0
  FORWARD_IF_ERROR(dst_size, "");
2256
0
  RETURN_ERROR_IF(dst_size < GENERIC_HEADER_SIZE, ENTITY_TOO_SMALL,
2257
0
      "dst_size: %"PRIu32"", dst_size);
2258
2259
0
  cmp_ent_set_model_id(dst, model_id);
2260
0
  cmp_ent_set_model_counter(dst, model_counter);
2261
2262
0
  return dst_size;
2263
0
}
2264
2265
2266
/**
2267
 * @brief compress data the same way as the RDCU HW compressor
2268
 *
2269
 * @param rcfg  pointer to a RDCU compression configuration (created with the
2270
 *    rdcu_cfg_create() function, set up with the rdcu_cfg_buffers()
2271
 *    and rdcu_cfg_imagette() functions)
2272
 * @param info  pointer to a compression information structure contains the
2273
 *    metadata of a compression (can be NULL)
2274
 *
2275
 * @returns the bit length of the bitstream on success or an error code if it
2276
 *  fails (which can be tested with cmp_is_error())
2277
 *
2278
 * @warning only the small buffer error in the info.cmp_err field is implemented
2279
 */
2280
2281
uint32_t compress_like_rdcu(const struct rdcu_cfg *rcfg, struct cmp_info *info)
2282
0
{
2283
0
  struct cmp_cfg cfg;
2284
0
  uint32_t cmp_size_bit;
2285
2286
0
  memset(&cfg, 0, sizeof(cfg));
2287
2288
0
  if (info)
2289
0
    memset(info, 0, sizeof(*info));
2290
2291
0
  if (!rcfg)
2292
0
    return compress_data_internal(NULL, 0);
2293
2294
0
  cfg.data_type = DATA_TYPE_IMAGETTE;
2295
2296
0
  cfg.src = rcfg->input_buf;
2297
0
  cfg.model_buf = rcfg->model_buf;
2298
0
  cfg.samples = rcfg->samples;
2299
0
  cfg.stream_size = (rcfg->buffer_length * sizeof(uint16_t));
2300
0
  cfg.cmp_mode = rcfg->cmp_mode;
2301
0
  cfg.model_value = rcfg->model_value;
2302
0
  cfg.round = rcfg->round;
2303
2304
0
  if (info) {
2305
0
    info->cmp_err = 0;
2306
0
    info->cmp_mode_used = (uint8_t)rcfg->cmp_mode;
2307
0
    info->model_value_used = (uint8_t)rcfg->model_value;
2308
0
    info->round_used = (uint8_t)rcfg->round;
2309
0
    info->spill_used = rcfg->spill;
2310
0
    info->golomb_par_used = rcfg->golomb_par;
2311
0
    info->samples_used = rcfg->samples;
2312
0
    info->rdcu_new_model_adr_used = rcfg->rdcu_new_model_adr;
2313
0
    info->rdcu_cmp_adr_used = rcfg->rdcu_buffer_adr;
2314
0
    info->cmp_size = 0;
2315
0
    info->ap1_cmp_size = 0;
2316
0
    info->ap2_cmp_size = 0;
2317
2318
0
    cfg.cmp_par_imagette = rcfg->ap1_golomb_par;
2319
0
    cfg.spill_imagette = rcfg->ap1_spill;
2320
0
    if (cfg.cmp_par_imagette &&
2321
0
        cmp_cfg_icu_is_invalid_error_code(&cfg) == CMP_ERROR_NO_ERROR)
2322
0
      info->ap1_cmp_size = compress_data_internal(&cfg, 0);
2323
2324
2325
0
    cfg.cmp_par_imagette = rcfg->ap2_golomb_par;
2326
0
    cfg.spill_imagette = rcfg->ap2_spill;
2327
0
    if (cfg.cmp_par_imagette &&
2328
0
        cmp_cfg_icu_is_invalid_error_code(&cfg) == CMP_ERROR_NO_ERROR)
2329
0
      info->ap2_cmp_size = compress_data_internal(&cfg, 0);
2330
0
  }
2331
2332
0
  cfg.cmp_par_imagette = rcfg->golomb_par;
2333
0
  cfg.spill_imagette = rcfg->spill;
2334
0
  cfg.updated_model_buf = rcfg->icu_new_model_buf;
2335
0
  cfg.dst = rcfg->icu_output_buf;
2336
2337
0
  FORWARD_IF_ERROR(cmp_cfg_icu_is_invalid_error_code(&cfg), "");
2338
2339
0
  cmp_size_bit = compress_data_internal(&cfg, 0);
2340
2341
0
  if (info) {
2342
0
    if (cmp_get_error_code(cmp_size_bit) == CMP_ERROR_SMALL_BUFFER)
2343
0
      info->cmp_err |= 1UL << 0;/* SMALL_BUFFER_ERR_BIT;*/ /* set small buffer error */
2344
0
    if (cmp_is_error(cmp_size_bit)) {
2345
0
      info->cmp_size = 0;
2346
0
      info->ap1_cmp_size = 0;
2347
0
      info->ap2_cmp_size = 0;
2348
0
    } else {
2349
0
      info->cmp_size = cmp_size_bit;
2350
0
    }
2351
0
  }
2352
2353
0
  return cmp_size_bit;
2354
0
}