Coverage Report

Created: 2025-06-15 00:57

/src/cmp_tool/lib/icu_compress/cmp_icu.c
Line
Count
Source (jump to first uncovered line)
1
/**
2
 * @file   cmp_icu.c
3
 * @author Dominik Loidolt (dominik.loidolt@univie.ac.at)
4
 * @date   2020
5
 *
6
 * @copyright GPLv2
7
 * This program is free software; you can redistribute it and/or modify it
8
 * under the terms and conditions of the GNU General Public License,
9
 * version 2, as published by the Free Software Foundation.
10
 *
11
 * This program is distributed in the hope it will be useful, but WITHOUT
12
 * ANY WARRANTY; without even the implied warranty of MERCHANTABILITY or
13
 * FITNESS FOR A PARTICULAR PURPOSE.  See the GNU General Public License for
14
 * more details.
15
 *
16
 * @brief software compression library
17
 * @see Data Compression User Manual PLATO-UVIE-PL-UM-0001
18
 */
19
20
21
#include <stdint.h>
22
#include <string.h>
23
#include <limits.h>
24
25
#include "../common/byteorder.h"
26
#include "../common/compiler.h"
27
#include "../common/cmp_debug.h"
28
#include "../common/cmp_data_types.h"
29
#include "../common/cmp_support.h"
30
#include "../common/cmp_cal_up_model.h"
31
#include "../common/cmp_max_used_bits.h"
32
#include "../common/cmp_entity.h"
33
#include "../common/cmp_error.h"
34
#include "../common/cmp_error_list.h"
35
#include "../common/leon_inttypes.h"
36
#include "cmp_chunk_type.h"
37
38
#include "../cmp_icu.h"
39
#include "../cmp_chunk.h"
40
41
42
/**
43
 * @brief default implementation of the get_timestamp() function
44
 *
45
 * @returns 0
46
 */
47
48
static uint64_t default_get_timestamp(void)
49
2.05k
{
50
2.05k
  return 0;
51
2.05k
}
52
53
54
/**
55
 * @brief function pointer to a function returning a current PLATO timestamp
56
 *  initialised with the compress_chunk_init() function
57
 */
58
59
static uint64_t (*get_timestamp)(void) = default_get_timestamp;
60
61
62
/**
63
 * @brief holding the version_identifier for the compression header
64
 *  initialised with the compress_chunk_init() function
65
 */
66
67
static uint32_t version_identifier;
68
69
70
/**
71
 * @brief structure to hold a setup to encode a value
72
 */
73
74
struct encoder_setup {
75
  uint32_t (*generate_cw_f)(uint32_t value, uint32_t encoder_par1,
76
          uint32_t encoder_par2, uint32_t *cw); /**< function pointer to a code word encoder */
77
  uint32_t (*encode_method_f)(uint32_t data, uint32_t model, uint32_t stream_len,
78
            const struct encoder_setup *setup); /**< pointer to the encoding function */
79
  uint32_t *bitstream_adr; /**< start address of the compressed data bitstream */
80
  uint32_t max_stream_len; /**< maximum length of the bitstream in bits */
81
  uint32_t encoder_par1;   /**< encoding parameter 1 */
82
  uint32_t encoder_par2;   /**< encoding parameter 2 */
83
  uint32_t spillover_par;  /**< outlier parameter */
84
  uint32_t lossy_par;      /**< lossy compression parameter */
85
  uint32_t max_data_bits;  /**< how many bits are needed to represent the highest possible value */
86
};
87
88
89
/**
90
 * @brief map a signed value into a positive value range
91
 *
92
 * @param value_to_map  signed value to map
93
 * @param max_data_bits how many bits are needed to represent the
94
 *      highest possible value
95
 *
96
 * @returns the positive mapped value
97
 */
98
99
static uint32_t map_to_pos(uint32_t value_to_map, unsigned int max_data_bits)
100
132k
{
101
132k
  uint32_t const mask = (~0U >> (32 - max_data_bits)); /* mask the used bits */
102
132k
  uint32_t result;
103
104
132k
  value_to_map &= mask;
105
132k
  if (value_to_map >> (max_data_bits - 1)) { /* check the leading signed bit */
106
57.2k
    value_to_map |= ~mask; /* convert to 32-bit signed integer */
107
    /* map negative values to uneven numbers */
108
57.2k
    result = (-value_to_map) * 2 - 1; /* possible integer overflow is intended */
109
74.7k
  } else {
110
    /* map positive values to even numbers */
111
74.7k
    result = value_to_map * 2; /* possible integer overflow is intended */
112
74.7k
  }
113
114
132k
  return result;
115
132k
}
116
117
118
/**
119
 * @brief put the value of up to 32 bits into a big-endian bitstream
120
 *
121
 * @param value     the value to put into the bitstream
122
 * @param n_bits    number of bits to put into the bitstream
123
 * @param bit_offset    bit index where the bits will be put, seen from
124
 *        the very beginning of the bitstream
125
 * @param bitstream_adr   this is the pointer to the beginning of the
126
 *        bitstream (can be NULL)
127
 * @param max_stream_len  maximum length of the bitstream in *bits*; is
128
 *        ignored if bitstream_adr is NULL
129
 *
130
 * @returns the length of the generated bitstream in bits on success or an error
131
 *          code (which can be tested with cmp_is_error()) in the event of an
132
 *          incorrect input or if the bitstream buffer is too small to put the
133
 *          value in the bitstream.
134
 */
135
136
static uint32_t put_n_bits32(uint32_t value, unsigned int n_bits, uint32_t bit_offset,
137
           uint32_t *bitstream_adr, unsigned int max_stream_len)
138
215k
{
139
  /*
140
   *                               UNSEGMENTED
141
   * |-----------|XXXXXX|---------------|--------------------------------|
142
   * |-bits_left-|n_bits|-------------------bits_right-------------------|
143
   * ^
144
   * local_adr
145
   *                               SEGMENTED
146
   * |-----------------------------|XXX|XXX|-----------------------------|
147
   * |----------bits_left----------|n_bits-|---------bits_right----------|
148
   */
149
215k
  uint32_t const bits_left = bit_offset & 0x1F;
150
215k
  uint32_t const bits_right = 64 - bits_left - n_bits;
151
215k
  uint32_t const shift_left = 32 - n_bits;
152
215k
  uint32_t const stream_len = n_bits + bit_offset; /* no check for overflow */
153
215k
  uint32_t *local_adr;
154
215k
  uint32_t mask, tmp;
155
156
  /* Leave in case of erroneous input */
157
215k
  RETURN_ERROR_IF((int)shift_left < 0, INT_DECODER, "cannot insert more than 32 bits into the bit stream");  /* check n_bits <= 32 */
158
159
215k
  if (n_bits == 0)
160
0
    return stream_len;
161
162
215k
  if (!bitstream_adr)  /* Do we need to write data to the bitstream? */
163
127k
    return stream_len;
164
165
  /* Check if the bitstream buffer is large enough */
166
87.3k
  if (stream_len > max_stream_len)
167
6.10k
    return CMP_ERROR(SMALL_BUFFER);
168
169
81.2k
  local_adr = bitstream_adr + (bit_offset >> 5);
170
171
  /* clear the destination with inverse mask */
172
81.2k
  mask = (0XFFFFFFFFU << shift_left) >> bits_left;
173
81.2k
  tmp = be32_to_cpu(*local_adr) & ~mask;
174
175
  /* put (the first part of) the value into the bitstream */
176
81.2k
  tmp |= (value << shift_left) >> bits_left;
177
81.2k
  *local_adr = cpu_to_be32(tmp);
178
179
  /* Do we need to split the value over two words (SEGMENTED case) */
180
81.2k
  if (bits_right < 32) {
181
42.4k
    local_adr++;  /* adjust address */
182
183
    /* clear the destination */
184
42.4k
    mask = 0XFFFFFFFFU << bits_right;
185
42.4k
    tmp = be32_to_cpu(*local_adr) & ~mask;
186
187
    /* put the 2nd part of the value into the bitstream */
188
42.4k
    tmp |= value << bits_right;
189
42.4k
    *local_adr = cpu_to_be32(tmp);
190
42.4k
  }
191
81.2k
  return stream_len;
192
87.3k
}
193
194
195
/**
196
 * @brief forms the codeword according to the Rice code
197
 *
198
 * @param value   value to be encoded (must be smaller or equal than cmp_ima_max_spill(m))
199
 * @param m   Golomb parameter, only m's which are a power of 2 are allowed
200
 *      maximum allowed Golomb parameter is 0x80000000
201
 * @param log2_m  Rice parameter, is ilog_2(m) calculate outside function
202
 *      for better performance
203
 * @param cw    address where the code word is stored
204
 *
205
 * @warning there is no check of the validity of the input parameters!
206
 * @returns the length of the formed code word in bits; the code word is invalid
207
 *  if the return value is greater than 32
208
 */
209
210
static uint32_t rice_encoder(uint32_t value, uint32_t m, uint32_t log2_m,
211
           uint32_t *cw)
212
43.3k
{
213
43.3k
  uint32_t const q = value >> log2_m;  /* quotient of value/m */
214
43.3k
  uint32_t const qc = (1U << q) - 1;   /* quotient code without ending zero */
215
216
43.3k
  uint32_t const r = value & (m-1);    /* remainder of value/m */
217
43.3k
  uint32_t const rl = log2_m + 1;      /* length of the remainder (+1 for the 0 in the quotient code) */
218
219
43.3k
  *cw = (qc << (rl & 0x1FU)) | r; /* put the quotient and remainder code together */
220
  /*
221
   * NOTE: If log2_m = 31 -> rl = 32, (q << rl) leads to an undefined
222
   * behavior. However, in this case, a valid code with a maximum of 32
223
   * bits can only be formed if q = 0 and qc = 0. To prevent undefined
224
   * behavior, the right shift operand is masked (& 0x1FU)
225
   */
226
227
43.3k
  return rl + q;  /* calculate the length of the code word */
228
43.3k
}
229
230
231
/**
232
 * @brief forms a codeword according to the Golomb code
233
 *
234
 * @param value   value to be encoded (must be smaller or equal than cmp_ima_max_spill(m))
235
 * @param m   Golomb parameter (have to be bigger than 0)
236
 * @param log2_m  is ilog_2(m) calculate outside function for better performance
237
 * @param cw    address where the code word is stored
238
 *
239
 * @warning there is no check of the validity of the input parameters!
240
 * @returns the length of the formed code word in bits; the code word is invalid
241
 *  if the return value is greater than 32
242
 */
243
244
static uint32_t golomb_encoder(uint32_t value, uint32_t m, uint32_t log2_m,
245
             uint32_t *cw)
246
88.6k
{
247
88.6k
  uint32_t len = log2_m + 1;  /* codeword length in group 0 */
248
88.6k
  uint32_t const cutoff = (0x2U << log2_m) - m;  /* members in group 0 */
249
250
88.6k
  if (value < cutoff) {  /* group 0 */
251
63.4k
    *cw = value;
252
63.4k
  } else {  /* other groups */
253
25.2k
    uint32_t const reg_mask = 0x1FU;  /* mask for the right shift operand to prevent undefined behavior */
254
25.2k
    uint32_t const g = (value-cutoff) / m;  /* group number of same cw length */
255
25.2k
    uint32_t const r = (value-cutoff) - g * m; /* member in the group */
256
25.2k
    uint32_t const gc = (1U << (g & reg_mask)) - 1; /* prepare the left side in unary */
257
25.2k
    uint32_t const b = cutoff << 1;         /* form the base codeword */
258
259
25.2k
    *cw = gc << ((len+1) & reg_mask);  /* composed codeword part 1 */
260
25.2k
    *cw += b + r;                      /* composed codeword part 2 */
261
25.2k
    len += 1 + g;                      /* length of the codeword */
262
25.2k
  }
263
88.6k
  return len;
264
88.6k
}
265
266
267
/**
268
 * @brief generate a code word without an outlier mechanism and put it in the
269
 *  bitstream
270
 *
271
 * @param value   value to encode in the bitstream
272
 * @param stream_len  length of the bitstream in bits
273
 * @param setup   pointer to the encoder setup
274
 *
275
 * @returns the bit length of the bitstream on success or an error code if it
276
 *  fails (which can be tested with cmp_is_error())
277
 */
278
279
static uint32_t encode_normal(uint32_t value, uint32_t stream_len,
280
            const struct encoder_setup *setup)
281
132k
{
282
132k
  uint32_t code_word, cw_len;
283
284
132k
  cw_len = setup->generate_cw_f(value, setup->encoder_par1,
285
132k
              setup->encoder_par2, &code_word);
286
287
132k
  return put_n_bits32(code_word, cw_len, stream_len, setup->bitstream_adr,
288
132k
          setup->max_stream_len);
289
132k
}
290
291
292
/**
293
 * @brief subtracts the model from the data, encodes the result and puts it into
294
 *  bitstream, for encoding outlier use the zero escape symbol mechanism
295
 *
296
 * @param data    data to encode
297
 * @param model   model of the data (0 if not used)
298
 * @param stream_len  length of the bitstream in bits
299
 * @param setup   pointer to the encoder setup
300
 *
301
 * @returns the bit length of the bitstream on success or an error code if it
302
 *  fails (which can be tested with cmp_is_error())
303
 *
304
 * @note no check if the data or model are in the allowed range
305
 * @note no check if the setup->spillover_par is in the allowed range
306
 */
307
308
static uint32_t encode_value_zero(uint32_t data, uint32_t model, uint32_t stream_len,
309
          const struct encoder_setup *setup)
310
90.8k
{
311
90.8k
  data -= model; /* possible underflow is intended */
312
313
90.8k
  data = map_to_pos(data, setup->max_data_bits);
314
315
  /* For performance reasons, we check to see if there is an outlier
316
   * before adding one, rather than the other way around:
317
   * data++;
318
   * if (data < setup->spillover_par && data != 0)
319
   *  return ...
320
   */
321
90.8k
  if (data < (setup->spillover_par - 1)) { /* detect non-outlier */
322
29.8k
    data++; /* add 1 to every value so we can use 0 as the escape symbol */
323
29.8k
    return encode_normal(data, stream_len, setup);
324
29.8k
  }
325
326
60.9k
  data++; /* add 1 to every value so we can use 0 as the escape symbol */
327
328
  /* use zero as escape symbol */
329
60.9k
  stream_len = encode_normal(0, stream_len, setup);
330
60.9k
  if (cmp_is_error(stream_len))
331
670
    return stream_len;
332
333
  /* put the data unencoded in the bitstream */
334
60.3k
  stream_len = put_n_bits32(data, setup->max_data_bits, stream_len,
335
60.3k
          setup->bitstream_adr, setup->max_stream_len);
336
60.3k
  return stream_len;
337
60.9k
}
338
339
340
/**
341
 * @brief subtract the model from the data, encode the result and puts it into
342
 *  bitstream, for encoding outlier use the multi escape symbol mechanism
343
 *
344
 * @param data    data to encode
345
 * @param model   model of the data (0 if not used)
346
 * @param stream_len  length of the bitstream in bits
347
 * @param setup   pointer to the encoder setup
348
 *
349
 * @returns the bit length of the bitstream on success or an error code if it
350
 *  fails (which can be tested with cmp_is_error())
351
 *
352
 * @note no check if the data or model are in the allowed range
353
 * @note no check if the setup->spillover_par is in the allowed range
354
 */
355
356
static uint32_t encode_value_multi(uint32_t data, uint32_t model, uint32_t stream_len,
357
           const struct encoder_setup *setup)
358
41.1k
{
359
41.1k
  uint32_t unencoded_data;
360
41.1k
  unsigned int unencoded_data_len;
361
41.1k
  uint32_t escape_sym, escape_sym_offset;
362
363
41.1k
  data -= model; /* possible underflow is intended */
364
365
41.1k
  data = map_to_pos(data, setup->max_data_bits);
366
367
41.1k
  if (data < setup->spillover_par) /* detect non-outlier */
368
18.3k
    return  encode_normal(data, stream_len, setup);
369
370
  /*
371
   * In this mode we put the difference between the data and the spillover
372
   * threshold value (unencoded_data) after an encoded escape symbol, which
373
   * indicates that the next codeword is unencoded.
374
   * We use different escape symbols depending on the size of the needed
375
   * bit of unencoded data:
376
   * 0, 1, 2 bits needed for unencoded data -> escape symbol is spillover_par + 0
377
   * 3, 4 bits needed for unencoded data -> escape symbol is spillover_par + 1
378
   * 5, 6 bits needed for unencoded data -> escape symbol is spillover_par + 2
379
   * and so on
380
   */
381
22.8k
  unencoded_data = data - setup->spillover_par;
382
383
22.8k
  if (!unencoded_data) /* catch __builtin_clz(0) because the result is undefined.*/
384
69
    escape_sym_offset = 0;
385
22.7k
  else
386
22.7k
    escape_sym_offset = (31U - (uint32_t)__builtin_clz(unencoded_data)) >> 1;
387
388
22.8k
  escape_sym = setup->spillover_par + escape_sym_offset;
389
22.8k
  unencoded_data_len = (escape_sym_offset + 1U) << 1;
390
391
  /* put the escape symbol in the bitstream */
392
22.8k
  stream_len = encode_normal(escape_sym, stream_len, setup);
393
22.8k
  if (cmp_is_error(stream_len))
394
647
    return stream_len;
395
396
  /* put the unencoded data in the bitstream */
397
22.2k
  stream_len = put_n_bits32(unencoded_data, unencoded_data_len, stream_len,
398
22.2k
          setup->bitstream_adr, setup->max_stream_len);
399
22.2k
  return stream_len;
400
22.8k
}
401
402
403
/**
404
 * @brief encodes the data with the model and the given setup and put it into
405
 *  the bitstream
406
 *
407
 * @param data    data to encode
408
 * @param model   model of the data (0 if not used)
409
 * @param stream_len  length of the bitstream in bits
410
 * @param setup   pointer to the encoder setup
411
 *
412
 * @returns the bit length of the bitstream on success or an error code if it
413
 *  fails (which can be tested with cmp_is_error())
414
 */
415
416
static uint32_t encode_value(uint32_t data, uint32_t model, uint32_t stream_len,
417
           const struct encoder_setup *setup)
418
132k
{
419
132k
  uint32_t const mask = ~(0xFFFFFFFFU >> (32-setup->max_data_bits));
420
421
  /* lossy rounding of the data if lossy_par > 0 */
422
132k
  data = round_fwd(data, setup->lossy_par);
423
132k
  model = round_fwd(model, setup->lossy_par);
424
425
132k
  RETURN_ERROR_IF(data & mask || model & mask, DATA_VALUE_TOO_LARGE, "");
426
427
132k
  return setup->encode_method_f(data, model, stream_len, setup);
428
132k
}
429
430
431
/**
432
 * @brief calculate the maximum length of the bitstream in bits
433
 * @note we round down to the next 4-byte allied address because we access the
434
 *  cmp_buffer in uint32_t words
435
 *
436
 * @param stream_size size of the bitstream in bytes
437
 *
438
 * @returns buffer size in bits
439
 */
440
441
static uint32_t cmp_stream_size_to_bits(uint32_t stream_size)
442
53.9k
{
443
53.9k
  return (stream_size & ~0x3U) * 8;
444
53.9k
}
445
446
447
/**
448
 * @brief configure an encoder setup structure to have a setup to encode a value
449
 *
450
 * @param setup   pointer to the encoder setup
451
 * @param cmp_par compression parameter
452
 * @param spillover spillover_par parameter
453
 * @param lossy_par lossy compression parameter
454
 * @param max_data_bits how many bits are needed to represent the highest possible value
455
 * @param cfg   pointer to the compression configuration structure
456
 *
457
 * @warning input parameters are not checked for validity
458
 */
459
460
static void configure_encoder_setup(struct encoder_setup *setup,
461
            uint32_t cmp_par, uint32_t spillover,
462
            uint32_t lossy_par, uint32_t max_data_bits,
463
            const struct cmp_cfg *cfg)
464
42.5k
{
465
42.5k
  memset(setup, 0, sizeof(struct encoder_setup));
466
467
42.5k
  setup->encoder_par1 = cmp_par;
468
42.5k
  setup->max_data_bits = max_data_bits;
469
42.5k
  setup->lossy_par = lossy_par;
470
42.5k
  setup->bitstream_adr = cfg->dst;
471
42.5k
  setup->max_stream_len = cmp_stream_size_to_bits(cfg->stream_size);
472
42.5k
  setup->encoder_par2 = ilog_2(cmp_par);
473
42.5k
  setup->spillover_par = spillover;
474
475
  /* for encoder_par1 which is a power of two we can use the faster rice_encoder */
476
42.5k
  if (is_a_pow_of_2(setup->encoder_par1))
477
13.3k
    setup->generate_cw_f = &rice_encoder;
478
29.1k
  else
479
29.1k
    setup->generate_cw_f = &golomb_encoder;
480
481
  /* CMP_MODE_RAW is already handled before */
482
42.5k
  if (cfg->cmp_mode == CMP_MODE_MODEL_ZERO ||
483
42.5k
      cfg->cmp_mode == CMP_MODE_DIFF_ZERO)
484
25.7k
    setup->encode_method_f = &encode_value_zero;
485
16.8k
  else
486
16.8k
    setup->encode_method_f = &encode_value_multi;
487
42.5k
}
488
489
490
/**
491
 * @brief compress imagette data
492
 *
493
 * @param cfg   pointer to the compression configuration structure
494
 * @param stream_len  already used length of the bitstream in bits
495
 *
496
 * @returns the bit length of the bitstream on success or an error code if it
497
 *  fails (which can be tested with cmp_is_error())
498
 */
499
500
static uint32_t compress_imagette(const struct cmp_cfg *cfg, uint32_t stream_len)
501
4.33k
{
502
4.33k
  size_t i;
503
4.33k
  struct encoder_setup setup;
504
4.33k
  uint32_t max_data_bits;
505
506
4.33k
  const uint16_t *data_buf = cfg->src;
507
4.33k
  const uint16_t *model_buf = cfg->model_buf;
508
4.33k
  uint16_t model = 0;
509
4.33k
  const uint16_t *next_model_p = data_buf;
510
4.33k
  uint16_t *up_model_buf = NULL;
511
512
4.33k
  if (model_mode_is_used(cfg->cmp_mode)) {
513
3.14k
    model = get_unaligned(&model_buf[0]);
514
3.14k
    next_model_p = &model_buf[1];
515
3.14k
    up_model_buf = cfg->updated_model_buf;
516
3.14k
  }
517
518
4.33k
  if (cfg->data_type == DATA_TYPE_F_CAM_IMAGETTE ||
519
4.33k
      cfg->data_type == DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE) {
520
105
    max_data_bits = MAX_USED_BITS.fc_imagette;
521
4.22k
  } else if (cfg->data_type == DATA_TYPE_SAT_IMAGETTE ||
522
4.22k
       cfg->data_type == DATA_TYPE_SAT_IMAGETTE_ADAPTIVE) {
523
3.73k
    max_data_bits = MAX_USED_BITS.saturated_imagette;
524
3.73k
  } else { /* DATA_TYPE_IMAGETTE, DATA_TYPE_IMAGETTE_ADAPTIVE */
525
495
    max_data_bits = MAX_USED_BITS.nc_imagette;
526
495
  }
527
528
4.33k
  configure_encoder_setup(&setup, cfg->cmp_par_imagette,
529
4.33k
        cfg->spill_imagette, cfg->round, max_data_bits, cfg);
530
531
11.6k
  for (i = 0;; i++) {
532
11.6k
    stream_len = encode_value(get_unaligned(&data_buf[i]),
533
11.6k
            model, stream_len, &setup);
534
11.6k
    if (cmp_is_error(stream_len))
535
2.16k
      break;
536
537
9.44k
    if (up_model_buf) {
538
1.69k
      uint16_t data = get_unaligned(&data_buf[i]);
539
1.69k
      up_model_buf[i] = cmp_up_model(data, model, cfg->model_value,
540
1.69k
                   setup.lossy_par);
541
1.69k
    }
542
9.44k
    if (i >= cfg->samples-1)
543
2.17k
      break;
544
545
7.27k
    model = get_unaligned(&next_model_p[i]);
546
7.27k
  }
547
4.33k
  return stream_len;
548
4.33k
}
549
550
551
/**
552
 * @brief compress short normal light flux (S_FX) data
553
 *
554
 * @param cfg   pointer to the compression configuration structure
555
 * @param stream_len  already used length of the bitstream in bits
556
 *
557
 * @returns the bit length of the bitstream on success or an error code if it
558
 *  fails (which can be tested with cmp_is_error())
559
 */
560
561
static uint32_t compress_s_fx(const struct cmp_cfg *cfg, uint32_t stream_len)
562
708
{
563
708
  size_t i;
564
565
708
  const struct s_fx *data_buf = cfg->src;
566
708
  const struct s_fx *model_buf = cfg->model_buf;
567
708
  struct s_fx *up_model_buf = NULL;
568
708
  const struct s_fx *next_model_p;
569
708
  struct s_fx model;
570
708
  struct encoder_setup setup_exp_flag, setup_fx;
571
572
708
  if (model_mode_is_used(cfg->cmp_mode)) {
573
351
    model = model_buf[0];
574
351
    next_model_p = &model_buf[1];
575
351
    up_model_buf = cfg->updated_model_buf;
576
357
  } else {
577
357
    memset(&model, 0, sizeof(model));
578
357
    next_model_p = data_buf;
579
357
  }
580
581
708
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
582
708
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
583
708
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
584
708
        cfg->round, MAX_USED_BITS.s_fx, cfg);
585
586
2.12k
  for (i = 0;; i++) {
587
2.12k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
588
2.12k
            stream_len, &setup_exp_flag);
589
2.12k
    if (cmp_is_error(stream_len))
590
79
      break;
591
2.04k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
592
2.04k
            &setup_fx);
593
2.04k
    if (cmp_is_error(stream_len))
594
195
      break;
595
596
1.85k
    if (up_model_buf) {
597
475
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
598
475
                 cfg->model_value, setup_exp_flag.lossy_par);
599
475
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
600
475
                cfg->model_value, setup_fx.lossy_par);
601
475
    }
602
603
1.85k
    if (i >= cfg->samples-1)
604
434
      break;
605
606
1.41k
    model = next_model_p[i];
607
1.41k
  }
608
708
  return stream_len;
609
708
}
610
611
612
/**
613
 * @brief compress S_FX_EFX data
614
 *
615
 * @param cfg   pointer to the compression configuration structure
616
 * @param stream_len  already used length of the bitstream in bits
617
 *
618
 * @returns the bit length of the bitstream on success or an error code if it
619
 *  fails (which can be tested with cmp_is_error())
620
 */
621
622
static uint32_t compress_s_fx_efx(const struct cmp_cfg *cfg, uint32_t stream_len)
623
1.48k
{
624
1.48k
  size_t i;
625
626
1.48k
  const struct s_fx_efx *data_buf = cfg->src;
627
1.48k
  const struct s_fx_efx *model_buf = cfg->model_buf;
628
1.48k
  struct s_fx_efx *up_model_buf = NULL;
629
1.48k
  const struct s_fx_efx *next_model_p;
630
1.48k
  struct s_fx_efx model;
631
1.48k
  struct encoder_setup setup_exp_flag, setup_fx, setup_efx;
632
633
1.48k
  if (model_mode_is_used(cfg->cmp_mode)) {
634
589
    model = model_buf[0];
635
589
    next_model_p = &model_buf[1];
636
589
    up_model_buf = cfg->updated_model_buf;
637
895
  } else {
638
895
    memset(&model, 0, sizeof(model));
639
895
    next_model_p = data_buf;
640
895
  }
641
642
1.48k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
643
1.48k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
644
1.48k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
645
1.48k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
646
1.48k
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
647
1.48k
        cfg->round, MAX_USED_BITS.s_efx, cfg);
648
649
2.08k
  for (i = 0;; i++) {
650
2.08k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
651
2.08k
            stream_len, &setup_exp_flag);
652
2.08k
    if (cmp_is_error(stream_len))
653
21
      break;
654
2.06k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
655
2.06k
            &setup_fx);
656
2.06k
    if (cmp_is_error(stream_len))
657
138
      break;
658
1.92k
    stream_len = encode_value(data_buf[i].efx, model.efx,
659
1.92k
            stream_len, &setup_efx);
660
1.92k
    if (cmp_is_error(stream_len))
661
140
      return stream_len;
662
663
1.78k
    if (up_model_buf) {
664
213
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
665
213
        cfg->model_value, setup_exp_flag.lossy_par);
666
213
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
667
213
        cfg->model_value, setup_fx.lossy_par);
668
213
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
669
213
        cfg->model_value, setup_efx.lossy_par);
670
213
    }
671
672
1.78k
    if (i >= cfg->samples-1)
673
1.18k
      break;
674
675
603
    model = next_model_p[i];
676
603
  }
677
1.34k
  return stream_len;
678
1.48k
}
679
680
681
/**
682
 * @brief compress S_FX_NCOB data
683
 *
684
 * @param cfg   pointer to the compression configuration structure
685
 * @param stream_len  already used length of the bitstream in bits
686
 *
687
 * @returns the bit length of the bitstream on success or an error code if it
688
 *  fails (which can be tested with cmp_is_error())
689
 */
690
691
static uint32_t compress_s_fx_ncob(const struct cmp_cfg *cfg, uint32_t stream_len)
692
1.63k
{
693
1.63k
  size_t i;
694
695
1.63k
  const struct s_fx_ncob *data_buf = cfg->src;
696
1.63k
  const struct s_fx_ncob *model_buf = cfg->model_buf;
697
1.63k
  struct s_fx_ncob *up_model_buf = NULL;
698
1.63k
  const struct s_fx_ncob *next_model_p;
699
1.63k
  struct s_fx_ncob model;
700
1.63k
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob;
701
702
1.63k
  if (model_mode_is_used(cfg->cmp_mode)) {
703
712
    model = model_buf[0];
704
712
    next_model_p = &model_buf[1];
705
712
    up_model_buf = cfg->updated_model_buf;
706
925
  } else {
707
925
    memset(&model, 0, sizeof(model));
708
925
    next_model_p = data_buf;
709
925
  }
710
711
1.63k
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
712
1.63k
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
713
1.63k
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
714
1.63k
        cfg->round, MAX_USED_BITS.s_fx, cfg);
715
1.63k
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
716
1.63k
        cfg->round, MAX_USED_BITS.s_ncob, cfg);
717
718
2.62k
  for (i = 0;; i++) {
719
2.62k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
720
2.62k
            stream_len, &setup_exp_flag);
721
2.62k
    if (cmp_is_error(stream_len))
722
32
      break;
723
2.59k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
724
2.59k
            &setup_fx);
725
2.59k
    if (cmp_is_error(stream_len))
726
57
      break;
727
2.53k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
728
2.53k
            stream_len, &setup_ncob);
729
2.53k
    if (cmp_is_error(stream_len))
730
85
      break;
731
2.45k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
732
2.45k
            stream_len, &setup_ncob);
733
2.45k
    if (cmp_is_error(stream_len))
734
87
      break;
735
736
2.36k
    if (up_model_buf) {
737
647
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
738
647
        cfg->model_value, setup_exp_flag.lossy_par);
739
647
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
740
647
        cfg->model_value, setup_fx.lossy_par);
741
647
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
742
647
        cfg->model_value, setup_ncob.lossy_par);
743
647
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
744
647
        cfg->model_value, setup_ncob.lossy_par);
745
647
    }
746
747
2.36k
    if (i >= cfg->samples-1)
748
1.37k
      break;
749
750
989
    model = next_model_p[i];
751
989
  }
752
1.63k
  return stream_len;
753
1.63k
}
754
755
756
/**
757
 * @brief compress S_FX_EFX_NCOB_ECOB data
758
 *
759
 * @param cfg   pointer to the compression configuration structure
760
 * @param stream_len  already used length of the bitstream in bits
761
 *
762
 * @returns the bit length of the bitstream on success or an error code if it
763
 *  fails (which can be tested with cmp_is_error())
764
 */
765
766
static uint32_t compress_s_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, uint32_t stream_len)
767
857
{
768
857
  size_t i;
769
770
857
  const struct s_fx_efx_ncob_ecob *data_buf = cfg->src;
771
857
  const struct s_fx_efx_ncob_ecob *model_buf = cfg->model_buf;
772
857
  struct s_fx_efx_ncob_ecob *up_model_buf = NULL;
773
857
  const struct s_fx_efx_ncob_ecob *next_model_p;
774
857
  struct s_fx_efx_ncob_ecob model;
775
857
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob, setup_efx,
776
857
            setup_ecob;
777
778
857
  if (model_mode_is_used(cfg->cmp_mode)) {
779
179
    model = model_buf[0];
780
179
    next_model_p = &model_buf[1];
781
179
    up_model_buf = cfg->updated_model_buf;
782
678
  } else {
783
678
    memset(&model, 0, sizeof(model));
784
678
    next_model_p = data_buf;
785
678
  }
786
787
857
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
788
857
        cfg->round, MAX_USED_BITS.s_exp_flags, cfg);
789
857
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
790
857
        cfg->round, MAX_USED_BITS.s_fx, cfg);
791
857
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
792
857
        cfg->round, MAX_USED_BITS.s_ncob, cfg);
793
857
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
794
857
        cfg->round, MAX_USED_BITS.s_efx, cfg);
795
857
  configure_encoder_setup(&setup_ecob, cfg->cmp_par_ecob, cfg->spill_ecob,
796
857
        cfg->round, MAX_USED_BITS.s_ecob, cfg);
797
798
1.93k
  for (i = 0;; i++) {
799
1.93k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
800
1.93k
            stream_len, &setup_exp_flag);
801
1.93k
    if (cmp_is_error(stream_len))
802
38
      break;
803
1.89k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
804
1.89k
            &setup_fx);
805
1.89k
    if (cmp_is_error(stream_len))
806
51
      break;
807
1.84k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
808
1.84k
            stream_len, &setup_ncob);
809
1.84k
    if (cmp_is_error(stream_len))
810
82
      break;
811
1.76k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
812
1.76k
            stream_len, &setup_ncob);
813
1.76k
    if (cmp_is_error(stream_len))
814
79
      break;
815
1.68k
    stream_len = encode_value(data_buf[i].efx, model.efx,
816
1.68k
            stream_len, &setup_efx);
817
1.68k
    if (cmp_is_error(stream_len))
818
97
      break;
819
1.59k
    stream_len = encode_value(data_buf[i].ecob_x, model.ecob_x,
820
1.59k
            stream_len, &setup_ecob);
821
1.59k
    if (cmp_is_error(stream_len))
822
79
      break;
823
1.51k
    stream_len = encode_value(data_buf[i].ecob_y, model.ecob_y,
824
1.51k
            stream_len, &setup_ecob);
825
1.51k
    if (cmp_is_error(stream_len))
826
82
      break;
827
828
1.42k
    if (up_model_buf) {
829
161
      up_model_buf[i].exp_flags = cmp_up_model(data_buf[i].exp_flags, model.exp_flags,
830
161
        cfg->model_value, setup_exp_flag.lossy_par);
831
161
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
832
161
        cfg->model_value, setup_fx.lossy_par);
833
161
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
834
161
        cfg->model_value, setup_ncob.lossy_par);
835
161
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
836
161
        cfg->model_value, setup_ncob.lossy_par);
837
161
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
838
161
        cfg->model_value, setup_efx.lossy_par);
839
161
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
840
161
        cfg->model_value, setup_ecob.lossy_par);
841
161
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
842
161
        cfg->model_value, setup_ecob.lossy_par);
843
161
    }
844
845
1.42k
    if (i >= cfg->samples-1)
846
349
      break;
847
848
1.08k
    model = next_model_p[i];
849
1.08k
  }
850
857
  return stream_len;
851
857
}
852
853
854
/**
855
 * @brief compress L_FX data
856
 *
857
 * @param cfg   pointer to the compression configuration structure
858
 * @param stream_len  already used length of the bitstream in bits
859
 *
860
 * @returns the bit length of the bitstream on success or an error code if it
861
 *  fails (which can be tested with cmp_is_error())
862
 */
863
864
static uint32_t compress_l_fx(const struct cmp_cfg *cfg, uint32_t stream_len)
865
610
{
866
610
  size_t i;
867
868
610
  const struct l_fx *data_buf = cfg->src;
869
610
  const struct l_fx *model_buf = cfg->model_buf;
870
610
  struct l_fx *up_model_buf = NULL;
871
610
  const struct l_fx *next_model_p;
872
610
  struct l_fx model;
873
610
  struct encoder_setup setup_exp_flag, setup_fx, setup_fx_var;
874
875
610
  if (model_mode_is_used(cfg->cmp_mode)) {
876
194
    model = model_buf[0];
877
194
    next_model_p = &model_buf[1];
878
194
    up_model_buf = cfg->updated_model_buf;
879
416
  } else {
880
416
    memset(&model, 0, sizeof(model));
881
416
    next_model_p = data_buf;
882
416
  }
883
884
610
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
885
610
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
886
610
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
887
610
        cfg->round, MAX_USED_BITS.l_fx, cfg);
888
610
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
889
610
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
890
891
2.06k
  for (i = 0;; i++) {
892
2.06k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
893
2.06k
            stream_len, &setup_exp_flag);
894
2.06k
    if (cmp_is_error(stream_len))
895
84
      break;
896
1.98k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
897
1.98k
            &setup_fx);
898
1.98k
    if (cmp_is_error(stream_len))
899
71
      break;
900
1.91k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
901
1.91k
            stream_len, &setup_fx_var);
902
1.91k
    if (cmp_is_error(stream_len))
903
81
      break;
904
905
1.83k
    if (up_model_buf) {
906
390
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
907
390
        cfg->model_value, setup_exp_flag.lossy_par);
908
390
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
909
390
        cfg->model_value, setup_fx.lossy_par);
910
390
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
911
390
        cfg->model_value, setup_fx_var.lossy_par);
912
390
    }
913
914
1.83k
    if (i >= cfg->samples-1)
915
374
      break;
916
917
1.45k
    model = next_model_p[i];
918
1.45k
  }
919
610
  return stream_len;
920
610
}
921
922
923
/**
924
 * @brief compress L_FX_EFX data
925
 *
926
 * @param cfg   pointer to the compression configuration structure
927
 * @param stream_len  already used length of the bitstream in bits
928
 *
929
 * @returns the bit length of the bitstream on success or an error code if it
930
 *  fails (which can be tested with cmp_is_error())
931
 */
932
933
static uint32_t compress_l_fx_efx(const struct cmp_cfg *cfg, uint32_t stream_len)
934
512
{
935
512
  size_t i;
936
937
512
  const struct l_fx_efx *data_buf = cfg->src;
938
512
  const struct l_fx_efx *model_buf = cfg->model_buf;
939
512
  struct l_fx_efx *up_model_buf = NULL;
940
512
  const struct l_fx_efx *next_model_p;
941
512
  struct l_fx_efx model;
942
512
  struct encoder_setup setup_exp_flag, setup_fx, setup_efx, setup_fx_var;
943
944
512
  if (model_mode_is_used(cfg->cmp_mode)) {
945
64
    model = model_buf[0];
946
64
    next_model_p = &model_buf[1];
947
64
    up_model_buf = cfg->updated_model_buf;
948
448
  } else {
949
448
    memset(&model, 0, sizeof(model));
950
448
    next_model_p = data_buf;
951
448
  }
952
953
512
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
954
512
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
955
512
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
956
512
        cfg->round, MAX_USED_BITS.l_fx, cfg);
957
512
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
958
512
        cfg->round, MAX_USED_BITS.l_efx, cfg);
959
512
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
960
512
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
961
962
4.63k
  for (i = 0;; i++) {
963
4.63k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
964
4.63k
            stream_len, &setup_exp_flag);
965
4.63k
    if (cmp_is_error(stream_len))
966
23
      break;
967
4.61k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
968
4.61k
            &setup_fx);
969
4.61k
    if (cmp_is_error(stream_len))
970
43
      break;
971
4.57k
    stream_len = encode_value(data_buf[i].efx, model.efx,
972
4.57k
            stream_len, &setup_efx);
973
4.57k
    if (cmp_is_error(stream_len))
974
54
      break;
975
4.51k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
976
4.51k
            stream_len, &setup_fx_var);
977
4.51k
    if (cmp_is_error(stream_len))
978
51
      break;
979
980
4.46k
    if (up_model_buf) {
981
225
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
982
225
        cfg->model_value, setup_exp_flag.lossy_par);
983
225
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
984
225
        cfg->model_value, setup_fx.lossy_par);
985
225
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
986
225
        cfg->model_value, setup_efx.lossy_par);
987
225
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
988
225
        cfg->model_value, setup_fx_var.lossy_par);
989
225
    }
990
991
4.46k
    if (i >= cfg->samples-1)
992
341
      break;
993
994
4.12k
    model = next_model_p[i];
995
4.12k
  }
996
512
  return stream_len;
997
512
}
998
999
1000
/**
1001
 * @brief compress L_FX_NCOB data
1002
 *
1003
 * @param cfg   pointer to the compression configuration structure
1004
 * @param stream_len  already used length of the bitstream in bits
1005
 *
1006
 * @returns the bit length of the bitstream on success or an error code if it
1007
 *  fails (which can be tested with cmp_is_error())
1008
 */
1009
1010
static uint32_t compress_l_fx_ncob(const struct cmp_cfg *cfg, uint32_t stream_len)
1011
478
{
1012
478
  size_t i;
1013
1014
478
  const struct l_fx_ncob *data_buf = cfg->src;
1015
478
  const struct l_fx_ncob *model_buf = cfg->model_buf;
1016
478
  struct l_fx_ncob *up_model_buf = NULL;
1017
478
  const struct l_fx_ncob *next_model_p;
1018
478
  struct l_fx_ncob model;
1019
478
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob,
1020
478
            setup_fx_var, setup_cob_var;
1021
1022
478
  if (model_mode_is_used(cfg->cmp_mode)) {
1023
53
    model = model_buf[0];
1024
53
    next_model_p = &model_buf[1];
1025
53
    up_model_buf = cfg->updated_model_buf;
1026
425
  } else {
1027
425
    memset(&model, 0, sizeof(model));
1028
425
    next_model_p = data_buf;
1029
425
  }
1030
1031
478
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1032
478
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
1033
478
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
1034
478
        cfg->round, MAX_USED_BITS.l_fx, cfg);
1035
478
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
1036
478
        cfg->round, MAX_USED_BITS.l_ncob, cfg);
1037
  /* we use the cmp_par_fx_cob_variance parameter for fx and cob variance data */
1038
478
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1039
478
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1040
478
  configure_encoder_setup(&setup_cob_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1041
478
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1042
1043
3.15k
  for (i = 0;; i++) {
1044
3.15k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
1045
3.15k
            stream_len, &setup_exp_flag);
1046
3.15k
    if (cmp_is_error(stream_len))
1047
39
      break;
1048
3.11k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
1049
3.11k
            &setup_fx);
1050
3.11k
    if (cmp_is_error(stream_len))
1051
45
      break;
1052
3.06k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
1053
3.06k
            stream_len, &setup_ncob);
1054
3.06k
    if (cmp_is_error(stream_len))
1055
40
      break;
1056
3.02k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
1057
3.02k
            stream_len, &setup_ncob);
1058
3.02k
    if (cmp_is_error(stream_len))
1059
36
      break;
1060
2.99k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
1061
2.99k
            stream_len, &setup_fx_var);
1062
2.99k
    if (cmp_is_error(stream_len))
1063
44
      break;
1064
2.94k
    stream_len = encode_value(data_buf[i].cob_x_variance, model.cob_x_variance,
1065
2.94k
            stream_len, &setup_cob_var);
1066
2.94k
    if (cmp_is_error(stream_len))
1067
35
      break;
1068
2.91k
    stream_len = encode_value(data_buf[i].cob_y_variance, model.cob_y_variance,
1069
2.91k
            stream_len, &setup_cob_var);
1070
2.91k
    if (cmp_is_error(stream_len))
1071
40
      break;
1072
1073
2.87k
    if (up_model_buf) {
1074
250
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1075
250
        cfg->model_value, setup_exp_flag.lossy_par);
1076
250
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1077
250
        cfg->model_value, setup_fx.lossy_par);
1078
250
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1079
250
        cfg->model_value, setup_ncob.lossy_par);
1080
250
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1081
250
        cfg->model_value, setup_ncob.lossy_par);
1082
250
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1083
250
        cfg->model_value, setup_fx_var.lossy_par);
1084
250
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1085
250
        cfg->model_value, setup_cob_var.lossy_par);
1086
250
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1087
250
        cfg->model_value, setup_cob_var.lossy_par);
1088
250
    }
1089
1090
2.87k
    if (i >= cfg->samples-1)
1091
199
      break;
1092
1093
2.67k
    model = next_model_p[i];
1094
2.67k
  }
1095
478
  return stream_len;
1096
478
}
1097
1098
1099
/**
1100
 * @brief compress L_FX_EFX_NCOB_ECOB data
1101
 *
1102
 * @param cfg   pointer to the compression configuration structure
1103
 * @param stream_len  already used length of the bitstream in bits
1104
 *
1105
 * @returns the bit length of the bitstream on success or an error code if it
1106
 *  fails (which can be tested with cmp_is_error())
1107
 */
1108
1109
static uint32_t compress_l_fx_efx_ncob_ecob(const struct cmp_cfg *cfg, uint32_t stream_len)
1110
560
{
1111
560
  size_t i;
1112
1113
560
  const struct l_fx_efx_ncob_ecob *data_buf = cfg->src;
1114
560
  const struct l_fx_efx_ncob_ecob *model_buf = cfg->model_buf;
1115
560
  struct l_fx_efx_ncob_ecob *up_model_buf = NULL;
1116
560
  const struct l_fx_efx_ncob_ecob *next_model_p;
1117
560
  struct l_fx_efx_ncob_ecob model;
1118
560
  struct encoder_setup setup_exp_flag, setup_fx, setup_ncob, setup_efx,
1119
560
            setup_ecob, setup_fx_var, setup_cob_var;
1120
1121
560
  if (model_mode_is_used(cfg->cmp_mode)) {
1122
245
    model = model_buf[0];
1123
245
    next_model_p = &model_buf[1];
1124
245
    up_model_buf = cfg->updated_model_buf;
1125
315
  } else {
1126
315
    memset(&model, 0, sizeof(model));
1127
315
    next_model_p = data_buf;
1128
315
  }
1129
1130
560
  configure_encoder_setup(&setup_exp_flag, cfg->cmp_par_exp_flags, cfg->spill_exp_flags,
1131
560
        cfg->round, MAX_USED_BITS.l_exp_flags, cfg);
1132
560
  configure_encoder_setup(&setup_fx, cfg->cmp_par_fx, cfg->spill_fx,
1133
560
        cfg->round, MAX_USED_BITS.l_fx, cfg);
1134
560
  configure_encoder_setup(&setup_ncob, cfg->cmp_par_ncob, cfg->spill_ncob,
1135
560
        cfg->round, MAX_USED_BITS.l_ncob, cfg);
1136
560
  configure_encoder_setup(&setup_efx, cfg->cmp_par_efx, cfg->spill_efx,
1137
560
        cfg->round, MAX_USED_BITS.l_efx, cfg);
1138
560
  configure_encoder_setup(&setup_ecob, cfg->cmp_par_ecob, cfg->spill_ecob,
1139
560
        cfg->round, MAX_USED_BITS.l_ecob, cfg);
1140
  /* we use compression parameters for both variance data fields */
1141
560
  configure_encoder_setup(&setup_fx_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1142
560
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1143
560
  configure_encoder_setup(&setup_cob_var, cfg->cmp_par_fx_cob_variance, cfg->spill_fx_cob_variance,
1144
560
        cfg->round, MAX_USED_BITS.l_fx_cob_variance, cfg);
1145
1146
2.11k
  for (i = 0;; i++) {
1147
2.11k
    stream_len = encode_value(data_buf[i].exp_flags, model.exp_flags,
1148
2.11k
            stream_len, &setup_exp_flag);
1149
2.11k
    if (cmp_is_error(stream_len))
1150
20
      break;
1151
2.09k
    stream_len = encode_value(data_buf[i].fx, model.fx, stream_len,
1152
2.09k
            &setup_fx);
1153
2.09k
    if (cmp_is_error(stream_len))
1154
20
      break;
1155
2.07k
    stream_len = encode_value(data_buf[i].ncob_x, model.ncob_x,
1156
2.07k
            stream_len, &setup_ncob);
1157
2.07k
    if (cmp_is_error(stream_len))
1158
34
      break;
1159
2.03k
    stream_len = encode_value(data_buf[i].ncob_y, model.ncob_y,
1160
2.03k
            stream_len, &setup_ncob);
1161
2.03k
    if (cmp_is_error(stream_len))
1162
34
      break;
1163
2.00k
    stream_len = encode_value(data_buf[i].efx, model.efx,
1164
2.00k
            stream_len, &setup_efx);
1165
2.00k
    if (cmp_is_error(stream_len))
1166
36
      break;
1167
1.96k
    stream_len = encode_value(data_buf[i].ecob_x, model.ecob_x,
1168
1.96k
            stream_len, &setup_ecob);
1169
1.96k
    if (cmp_is_error(stream_len))
1170
38
      break;
1171
1.92k
    stream_len = encode_value(data_buf[i].ecob_y, model.ecob_y,
1172
1.92k
            stream_len, &setup_ecob);
1173
1.92k
    if (cmp_is_error(stream_len))
1174
20
      break;
1175
1.90k
    stream_len = encode_value(data_buf[i].fx_variance, model.fx_variance,
1176
1.90k
            stream_len, &setup_fx_var);
1177
1.90k
    if (cmp_is_error(stream_len))
1178
20
      break;
1179
1.88k
    stream_len = encode_value(data_buf[i].cob_x_variance, model.cob_x_variance,
1180
1.88k
            stream_len, &setup_cob_var);
1181
1.88k
    if (cmp_is_error(stream_len))
1182
19
      break;
1183
1.86k
    stream_len = encode_value(data_buf[i].cob_y_variance, model.cob_y_variance,
1184
1.86k
            stream_len, &setup_cob_var);
1185
1.86k
    if (cmp_is_error(stream_len))
1186
19
      break;
1187
1188
1.85k
    if (up_model_buf) {
1189
425
      up_model_buf[i].exp_flags = cmp_up_model32(data_buf[i].exp_flags, model.exp_flags,
1190
425
        cfg->model_value, setup_exp_flag.lossy_par);
1191
425
      up_model_buf[i].fx = cmp_up_model(data_buf[i].fx, model.fx,
1192
425
        cfg->model_value, setup_fx.lossy_par);
1193
425
      up_model_buf[i].ncob_x = cmp_up_model(data_buf[i].ncob_x, model.ncob_x,
1194
425
        cfg->model_value, setup_ncob.lossy_par);
1195
425
      up_model_buf[i].ncob_y = cmp_up_model(data_buf[i].ncob_y, model.ncob_y,
1196
425
        cfg->model_value, setup_ncob.lossy_par);
1197
425
      up_model_buf[i].efx = cmp_up_model(data_buf[i].efx, model.efx,
1198
425
        cfg->model_value, setup_efx.lossy_par);
1199
425
      up_model_buf[i].ecob_x = cmp_up_model(data_buf[i].ecob_x, model.ecob_x,
1200
425
        cfg->model_value, setup_ecob.lossy_par);
1201
425
      up_model_buf[i].ecob_y = cmp_up_model(data_buf[i].ecob_y, model.ecob_y,
1202
425
        cfg->model_value, setup_ecob.lossy_par);
1203
425
      up_model_buf[i].fx_variance = cmp_up_model(data_buf[i].fx_variance, model.fx_variance,
1204
425
        cfg->model_value, setup_fx_var.lossy_par);
1205
425
      up_model_buf[i].cob_x_variance = cmp_up_model(data_buf[i].cob_x_variance, model.cob_x_variance,
1206
425
        cfg->model_value, setup_cob_var.lossy_par);
1207
425
      up_model_buf[i].cob_y_variance = cmp_up_model(data_buf[i].cob_y_variance, model.cob_y_variance,
1208
425
        cfg->model_value, setup_cob_var.lossy_par);
1209
425
    }
1210
1211
1.85k
    if (i >= cfg->samples-1)
1212
300
      break;
1213
1214
1.55k
    model = next_model_p[i];
1215
1.55k
  }
1216
560
  return stream_len;
1217
560
}
1218
1219
1220
/**
1221
 * @brief compress offset data from the normal and fast cameras
1222
 *
1223
 * @param cfg   pointer to the compression configuration structure
1224
 * @param stream_len  already used length of the bitstream in bits
1225
 *
1226
 * @returns the bit length of the bitstream on success or an error code if it
1227
 *  fails (which can be tested with cmp_is_error())
1228
 */
1229
1230
static uint32_t compress_offset(const struct cmp_cfg *cfg, uint32_t stream_len)
1231
1.87k
{
1232
1.87k
  size_t i;
1233
1234
1.87k
  const struct offset *data_buf = cfg->src;
1235
1.87k
  const struct offset *model_buf = cfg->model_buf;
1236
1.87k
  struct offset *up_model_buf = NULL;
1237
1.87k
  const struct offset *next_model_p;
1238
1.87k
  struct offset model;
1239
1.87k
  struct encoder_setup setup_mean, setup_var;
1240
1241
1.87k
  if (model_mode_is_used(cfg->cmp_mode)) {
1242
570
    model = model_buf[0];
1243
570
    next_model_p = &model_buf[1];
1244
570
    up_model_buf = cfg->updated_model_buf;
1245
1.30k
  } else {
1246
1.30k
    memset(&model, 0, sizeof(model));
1247
1.30k
    next_model_p = data_buf;
1248
1.30k
  }
1249
1250
1.87k
  {
1251
1.87k
    unsigned int mean_bits_used, variance_bits_used;
1252
1253
1.87k
    if (cfg->data_type == DATA_TYPE_F_CAM_OFFSET) {
1254
94
      mean_bits_used = MAX_USED_BITS.fc_offset_mean;
1255
94
      variance_bits_used = MAX_USED_BITS.fc_offset_variance;
1256
1.78k
    } else { /* DATA_TYPE_OFFSET */
1257
1.78k
      mean_bits_used = MAX_USED_BITS.nc_offset_mean;
1258
1.78k
      variance_bits_used = MAX_USED_BITS.nc_offset_variance;
1259
1.78k
    }
1260
1261
1.87k
    configure_encoder_setup(&setup_mean, cfg->cmp_par_offset_mean, cfg->spill_offset_mean,
1262
1.87k
          cfg->round, mean_bits_used, cfg);
1263
1.87k
    configure_encoder_setup(&setup_var, cfg->cmp_par_offset_variance, cfg->spill_offset_variance,
1264
1.87k
          cfg->round, variance_bits_used, cfg);
1265
1.87k
  }
1266
1267
2.99k
  for (i = 0;; i++) {
1268
2.99k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1269
2.99k
            stream_len, &setup_mean);
1270
2.99k
    if (cmp_is_error(stream_len))
1271
173
      return stream_len;
1272
2.82k
    stream_len = encode_value(data_buf[i].variance, model.variance,
1273
2.82k
            stream_len, &setup_var);
1274
2.82k
    if (cmp_is_error(stream_len))
1275
443
      return stream_len;
1276
1277
2.37k
    if (up_model_buf) {
1278
346
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1279
346
        cfg->model_value, setup_mean.lossy_par);
1280
346
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance, model.variance,
1281
346
        cfg->model_value, setup_var.lossy_par);
1282
346
    }
1283
1284
2.37k
    if (i >= cfg->samples-1)
1285
1.25k
      break;
1286
1287
1.12k
    model = next_model_p[i];
1288
1.12k
  }
1289
1.25k
  return stream_len;
1290
1.87k
}
1291
1292
1293
/**
1294
 * @brief compress background data from the normal and fast cameras
1295
 *
1296
 * @param cfg   pointer to the compression configuration structure
1297
 * @param stream_len  already used length of the bitstream in bits
1298
 *
1299
 * @returns the bit length of the bitstream on success or an error code if it
1300
 *  fails (which can be tested with cmp_is_error())
1301
 */
1302
1303
static uint32_t compress_background(const struct cmp_cfg *cfg, uint32_t stream_len)
1304
1.53k
{
1305
1.53k
  size_t i;
1306
1307
1.53k
  const struct background *data_buf = cfg->src;
1308
1.53k
  const struct background *model_buf = cfg->model_buf;
1309
1.53k
  struct background *up_model_buf = NULL;
1310
1.53k
  const struct background *next_model_p;
1311
1.53k
  struct background model;
1312
1.53k
  struct encoder_setup setup_mean, setup_var, setup_pix;
1313
1314
1.53k
  if (model_mode_is_used(cfg->cmp_mode)) {
1315
674
    model = model_buf[0];
1316
674
    next_model_p = &model_buf[1];
1317
674
    up_model_buf = cfg->updated_model_buf;
1318
857
  } else {
1319
857
    memset(&model, 0, sizeof(model));
1320
857
    next_model_p = data_buf;
1321
857
  }
1322
1323
1.53k
  {
1324
1.53k
    unsigned int mean_used_bits, varinace_used_bits, pixels_error_used_bits;
1325
1326
1.53k
    if (cfg->data_type == DATA_TYPE_F_CAM_BACKGROUND) {
1327
314
      mean_used_bits = MAX_USED_BITS.fc_background_mean;
1328
314
      varinace_used_bits = MAX_USED_BITS.fc_background_variance;
1329
314
      pixels_error_used_bits = MAX_USED_BITS.fc_background_outlier_pixels;
1330
1.21k
    } else { /* DATA_TYPE_BACKGROUND */
1331
1.21k
      mean_used_bits = MAX_USED_BITS.nc_background_mean;
1332
1.21k
      varinace_used_bits = MAX_USED_BITS.nc_background_variance;
1333
1.21k
      pixels_error_used_bits = MAX_USED_BITS.nc_background_outlier_pixels;
1334
1.21k
    }
1335
1.53k
    configure_encoder_setup(&setup_mean, cfg->cmp_par_background_mean, cfg->spill_background_mean,
1336
1.53k
          cfg->round, mean_used_bits, cfg);
1337
1.53k
    configure_encoder_setup(&setup_var, cfg->cmp_par_background_variance, cfg->spill_background_variance,
1338
1.53k
          cfg->round, varinace_used_bits, cfg);
1339
1.53k
    configure_encoder_setup(&setup_pix, cfg->cmp_par_background_pixels_error, cfg->spill_background_pixels_error,
1340
1.53k
          cfg->round, pixels_error_used_bits, cfg);
1341
1.53k
  }
1342
1343
2.42k
  for (i = 0;; i++) {
1344
2.42k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1345
2.42k
            stream_len, &setup_mean);
1346
2.42k
    if (cmp_is_error(stream_len))
1347
225
      return stream_len;
1348
2.20k
    stream_len = encode_value(data_buf[i].variance, model.variance,
1349
2.20k
            stream_len, &setup_var);
1350
2.20k
    if (cmp_is_error(stream_len))
1351
425
      return stream_len;
1352
1.77k
    stream_len = encode_value(data_buf[i].outlier_pixels, model.outlier_pixels,
1353
1.77k
            stream_len, &setup_pix);
1354
1.77k
    if (cmp_is_error(stream_len))
1355
71
      return stream_len;
1356
1357
1.70k
    if (up_model_buf) {
1358
346
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1359
346
        cfg->model_value, setup_mean.lossy_par);
1360
346
      up_model_buf[i].variance = cmp_up_model(data_buf[i].variance, model.variance,
1361
346
        cfg->model_value, setup_var.lossy_par);
1362
346
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels, model.outlier_pixels,
1363
346
        cfg->model_value, setup_pix.lossy_par);
1364
346
    }
1365
1366
1.70k
    if (i >= cfg->samples-1)
1367
810
      break;
1368
1369
898
    model = next_model_p[i];
1370
898
  }
1371
810
  return stream_len;
1372
1.53k
}
1373
1374
1375
/**
1376
 * @brief compress smearing data from the normal cameras
1377
 *
1378
 * @param cfg   pointer to the compression configuration structure
1379
 * @param stream_len  already used length of the bitstream in bits
1380
 *
1381
 * @returns the bit length of the bitstream on success or an error code if it
1382
 *  fails (which can be tested with cmp_is_error())
1383
 */
1384
1385
static uint32_t compress_smearing(const struct cmp_cfg *cfg, uint32_t stream_len)
1386
1.54k
{
1387
1.54k
  size_t i;
1388
1389
1.54k
  const struct smearing *data_buf = cfg->src;
1390
1.54k
  const struct smearing *model_buf = cfg->model_buf;
1391
1.54k
  struct smearing *up_model_buf = NULL;
1392
1.54k
  const struct smearing *next_model_p;
1393
1.54k
  struct smearing model;
1394
1.54k
  struct encoder_setup setup_mean, setup_var_mean, setup_pix;
1395
1396
1.54k
  if (model_mode_is_used(cfg->cmp_mode)) {
1397
736
    model = model_buf[0];
1398
736
    next_model_p = &model_buf[1];
1399
736
    up_model_buf = cfg->updated_model_buf;
1400
805
  } else {
1401
805
    memset(&model, 0, sizeof(model));
1402
805
    next_model_p = data_buf;
1403
805
  }
1404
1405
1.54k
  configure_encoder_setup(&setup_mean, cfg->cmp_par_smearing_mean, cfg->spill_smearing_mean,
1406
1.54k
        cfg->round, MAX_USED_BITS.smearing_mean, cfg);
1407
1.54k
  configure_encoder_setup(&setup_var_mean, cfg->cmp_par_smearing_variance, cfg->spill_smearing_variance,
1408
1.54k
        cfg->round, MAX_USED_BITS.smearing_variance_mean, cfg);
1409
1.54k
  configure_encoder_setup(&setup_pix, cfg->cmp_par_smearing_pixels_error, cfg->spill_smearing_pixels_error,
1410
1.54k
        cfg->round, MAX_USED_BITS.smearing_outlier_pixels, cfg);
1411
1412
3.50k
  for (i = 0;; i++) {
1413
3.50k
    stream_len = encode_value(data_buf[i].mean, model.mean,
1414
3.50k
            stream_len, &setup_mean);
1415
3.50k
    if (cmp_is_error(stream_len))
1416
150
      return stream_len;
1417
3.35k
    stream_len = encode_value(data_buf[i].variance_mean, model.variance_mean,
1418
3.35k
            stream_len, &setup_var_mean);
1419
3.35k
    if (cmp_is_error(stream_len))
1420
97
      return stream_len;
1421
3.26k
    stream_len = encode_value(data_buf[i].outlier_pixels, model.outlier_pixels,
1422
3.26k
            stream_len, &setup_pix);
1423
3.26k
    if (cmp_is_error(stream_len))
1424
67
      return stream_len;
1425
1426
3.19k
    if (up_model_buf) {
1427
1.45k
      up_model_buf[i].mean = cmp_up_model(data_buf[i].mean, model.mean,
1428
1.45k
        cfg->model_value, setup_mean.lossy_par);
1429
1.45k
      up_model_buf[i].variance_mean = cmp_up_model(data_buf[i].variance_mean, model.variance_mean,
1430
1.45k
        cfg->model_value, setup_var_mean.lossy_par);
1431
1.45k
      up_model_buf[i].outlier_pixels = cmp_up_model(data_buf[i].outlier_pixels, model.outlier_pixels,
1432
1.45k
        cfg->model_value, setup_pix.lossy_par);
1433
1.45k
    }
1434
1435
3.19k
    if (i >= cfg->samples-1)
1436
1.22k
      break;
1437
1438
1.96k
    model = next_model_p[i];
1439
1.96k
  }
1440
1.22k
  return stream_len;
1441
1.54k
}
1442
1443
1444
/**
1445
 * @brief check if two buffers are overlapping
1446
 * @see https://stackoverflow.com/a/325964
1447
 *
1448
 * @param buf_a   start address of the 1st buffer (can be NULL)
1449
 * @param size_a  byte size of the 1st buffer
1450
 * @param buf_b   start address of the 2nd buffer (can be NULL)
1451
 * @param size_b  byte size of the 2nd buffer
1452
 *
1453
 * @returns 0 if buffers are not overlapping, otherwise buffers are
1454
 *  overlapping
1455
 */
1456
1457
static int buffer_overlaps(const void *buf_a, size_t size_a,
1458
         const void *buf_b, size_t size_b)
1459
54.7k
{
1460
54.7k
  if (!buf_a)
1461
16.3k
    return 0;
1462
1463
38.3k
  if (!buf_b)
1464
9.54k
    return 0;
1465
1466
28.8k
  if ((const char *)buf_a < (const char *)buf_b + size_b &&
1467
28.8k
      (const char *)buf_b < (const char *)buf_a + size_a)
1468
0
    return 1;
1469
1470
28.8k
  return 0;
1471
28.8k
}
1472
1473
1474
/**
1475
 * @brief fill the last part of the bitstream with zeros
1476
 *
1477
 * @param cfg   pointer to the compression configuration structure
1478
 * @param cmp_size  length of the bitstream in bits
1479
 *
1480
 * @returns the bit length of the bitstream on success or an error code if it
1481
 *  fails (which can be tested with cmp_is_error())
1482
 */
1483
1484
static uint32_t pad_bitstream(const struct cmp_cfg *cfg, uint32_t cmp_size)
1485
25.7k
{
1486
25.7k
  unsigned int output_buf_len_bits, n_pad_bits;
1487
1488
25.7k
  if (!cfg->dst)
1489
17.5k
    return cmp_size;
1490
1491
  /* no padding in RAW mode; ALWAYS BIG-ENDIAN */
1492
8.25k
  if (cfg->cmp_mode == CMP_MODE_RAW)
1493
7.35k
    return cmp_size;
1494
1495
  /* maximum length of the bitstream in bits */
1496
901
  output_buf_len_bits = cmp_stream_size_to_bits(cfg->stream_size);
1497
1498
901
  n_pad_bits = 32 - (cmp_size & 0x1FU);
1499
901
  if (n_pad_bits < 32) {
1500
695
    FORWARD_IF_ERROR(put_n_bits32(0, n_pad_bits, cmp_size,
1501
695
         cfg->dst, output_buf_len_bits), "");
1502
695
  }
1503
1504
901
  return cmp_size;
1505
901
}
1506
1507
1508
/**
1509
 * @brief internal data compression function
1510
 * This function can compress all types of collection data (one at a time).
1511
 * This function does not take the header of a collection into account.
1512
 *
1513
 * @param cfg   pointer to the compression configuration structure
1514
 * @param stream_len  already used length of the bitstream in bits
1515
 *
1516
 * @note the validity of the cfg structure is not checked
1517
 *
1518
 * @returns the bit length of the bitstream on success or an error code if it
1519
 *  fails (which can be tested with cmp_is_error())
1520
 */
1521
1522
static uint32_t compress_data_internal(const struct cmp_cfg *cfg, uint32_t stream_len)
1523
36.0k
{
1524
36.0k
  uint32_t bitsize = 0;
1525
1526
36.0k
  FORWARD_IF_ERROR(stream_len, "");
1527
36.0k
  RETURN_ERROR_IF(cfg == NULL, GENERIC, "");
1528
36.0k
  RETURN_ERROR_IF(stream_len & 0x7, GENERIC, "The stream_len parameter must be a multiple of 8.");
1529
1530
36.0k
  if (cfg->samples == 0) /* nothing to compress we are done */
1531
4.15k
    return stream_len;
1532
1533
31.8k
  if (raw_mode_is_used(cfg->cmp_mode)) {
1534
15.7k
    uint32_t raw_size = cfg->samples * (uint32_t)size_of_a_sample(cfg->data_type);
1535
1536
15.7k
    if (cfg->dst) {
1537
7.35k
      uint32_t offset_bytes = stream_len >> 3;
1538
7.35k
      uint8_t *p = (uint8_t *)cfg->dst + offset_bytes;
1539
7.35k
      uint32_t new_stream_size = offset_bytes + raw_size;
1540
1541
7.35k
      RETURN_ERROR_IF(new_stream_size > cfg->stream_size, SMALL_BUFFER, "");
1542
7.35k
      memcpy(p, cfg->src, raw_size);
1543
7.35k
      RETURN_ERROR_IF(cpu_to_be_data_type(p, raw_size, cfg->data_type),
1544
7.35k
          INT_DATA_TYPE_UNSUPPORTED, "");
1545
7.35k
    }
1546
15.7k
    bitsize += stream_len + raw_size * 8; /* convert to bits */
1547
16.1k
  } else {
1548
16.1k
    switch (cfg->data_type) {
1549
495
    case DATA_TYPE_IMAGETTE:
1550
495
    case DATA_TYPE_IMAGETTE_ADAPTIVE:
1551
4.22k
    case DATA_TYPE_SAT_IMAGETTE:
1552
4.22k
    case DATA_TYPE_SAT_IMAGETTE_ADAPTIVE:
1553
4.33k
    case DATA_TYPE_F_CAM_IMAGETTE:
1554
4.33k
    case DATA_TYPE_F_CAM_IMAGETTE_ADAPTIVE:
1555
4.33k
      bitsize = compress_imagette(cfg, stream_len);
1556
4.33k
      break;
1557
1558
708
    case DATA_TYPE_S_FX:
1559
708
      bitsize = compress_s_fx(cfg, stream_len);
1560
708
      break;
1561
1.48k
    case DATA_TYPE_S_FX_EFX:
1562
1.48k
      bitsize = compress_s_fx_efx(cfg, stream_len);
1563
1.48k
      break;
1564
1.63k
    case DATA_TYPE_S_FX_NCOB:
1565
1.63k
      bitsize = compress_s_fx_ncob(cfg, stream_len);
1566
1.63k
      break;
1567
857
    case DATA_TYPE_S_FX_EFX_NCOB_ECOB:
1568
857
      bitsize = compress_s_fx_efx_ncob_ecob(cfg, stream_len);
1569
857
      break;
1570
1571
1572
610
    case DATA_TYPE_L_FX:
1573
610
      bitsize = compress_l_fx(cfg, stream_len);
1574
610
      break;
1575
512
    case DATA_TYPE_L_FX_EFX:
1576
512
      bitsize = compress_l_fx_efx(cfg, stream_len);
1577
512
      break;
1578
478
    case DATA_TYPE_L_FX_NCOB:
1579
478
      bitsize = compress_l_fx_ncob(cfg, stream_len);
1580
478
      break;
1581
560
    case DATA_TYPE_L_FX_EFX_NCOB_ECOB:
1582
560
      bitsize = compress_l_fx_efx_ncob_ecob(cfg, stream_len);
1583
560
      break;
1584
1585
1.78k
    case DATA_TYPE_OFFSET:
1586
1.87k
    case DATA_TYPE_F_CAM_OFFSET:
1587
1.87k
      bitsize = compress_offset(cfg, stream_len);
1588
1.87k
      break;
1589
1.21k
    case DATA_TYPE_BACKGROUND:
1590
1.53k
    case DATA_TYPE_F_CAM_BACKGROUND:
1591
1.53k
      bitsize = compress_background(cfg, stream_len);
1592
1.53k
      break;
1593
1.54k
    case DATA_TYPE_SMEARING:
1594
1.54k
      bitsize = compress_smearing(cfg, stream_len);
1595
1.54k
      break;
1596
1597
0
    case DATA_TYPE_F_FX:
1598
0
    case DATA_TYPE_F_FX_EFX:
1599
0
    case DATA_TYPE_F_FX_NCOB:
1600
0
    case DATA_TYPE_F_FX_EFX_NCOB_ECOB:
1601
0
    case DATA_TYPE_CHUNK:
1602
0
    case DATA_TYPE_UNKNOWN:
1603
0
    default:
1604
0
      RETURN_ERROR(INT_DATA_TYPE_UNSUPPORTED, "");
1605
16.1k
    }
1606
16.1k
  }
1607
1608
31.8k
  if (cmp_is_error(bitsize))
1609
6.10k
    return bitsize;
1610
1611
25.7k
  bitsize = pad_bitstream(cfg, bitsize);
1612
1613
25.7k
  return bitsize;
1614
31.8k
}
1615
1616
1617
/**
1618
 * @brief check if the ICU buffer parameters are invalid
1619
 *
1620
 * @param cfg pointer to the compressor configuration to check
1621
 *
1622
 * @returns 0 if the buffer parameters are valid, otherwise invalid
1623
 */
1624
1625
static uint32_t check_compression_buffers(const struct cmp_cfg *cfg)
1626
20.6k
{
1627
20.6k
  size_t data_size;
1628
1629
20.6k
  RETURN_ERROR_IF(cfg == NULL, GENERIC, "");
1630
1631
20.6k
  RETURN_ERROR_IF(cfg->src == NULL, CHUNK_NULL, "");
1632
1633
20.6k
  data_size = size_of_a_sample(cfg->data_type) * cfg->samples;
1634
1635
20.6k
  if (cfg->samples == 0)
1636
2.79k
    debug_print("Warning: The samples parameter is 0. No data are compressed. This behavior may not be intended.");
1637
1638
20.6k
  RETURN_ERROR_IF(buffer_overlaps(cfg->dst, cfg->stream_size,
1639
20.6k
          cfg->src, data_size), PAR_BUFFERS,
1640
20.6k
    "The compressed data buffer and the data to compress buffer are overlapping.");
1641
1642
20.6k
  if (model_mode_is_used(cfg->cmp_mode)) {
1643
8.54k
    RETURN_ERROR_IF(cfg->model_buf == NULL, PAR_NO_MODEL, "");
1644
1645
8.53k
    RETURN_ERROR_IF(buffer_overlaps(cfg->model_buf, data_size,
1646
8.53k
            cfg->src, data_size), PAR_BUFFERS,
1647
8.53k
        "The model buffer and the data to compress buffer are overlapping.");
1648
8.53k
    RETURN_ERROR_IF(buffer_overlaps(cfg->model_buf, data_size,
1649
8.53k
            cfg->dst, cfg->stream_size), PAR_BUFFERS,
1650
8.53k
        "The model buffer and the compressed data buffer are overlapping.");
1651
1652
8.53k
    RETURN_ERROR_IF(buffer_overlaps(cfg->updated_model_buf, data_size,
1653
8.53k
            cfg->src, data_size), PAR_BUFFERS,
1654
8.53k
        "The updated model buffer and the data to compress buffer are overlapping.");
1655
8.53k
    RETURN_ERROR_IF(buffer_overlaps(cfg->updated_model_buf, data_size,
1656
8.53k
            cfg->dst, cfg->stream_size), PAR_BUFFERS,
1657
8.53k
        "The updated model buffer and the compressed data buffer are overlapping.");
1658
8.53k
  }
1659
1660
20.6k
  return CMP_ERROR(NO_ERROR);
1661
20.6k
}
1662
1663
1664
/**
1665
 * @brief checks if the ICU compression configuration is valid
1666
 *
1667
 * @param cfg pointer to the cmp_cfg structure to be validated
1668
 *
1669
 * @returns an error code if any of the configuration parameters are invalid,
1670
 *  otherwise returns CMP_ERROR_NO_ERROR on valid configuration
1671
 */
1672
1673
static uint32_t cmp_cfg_icu_is_invalid_error_code(const struct cmp_cfg *cfg)
1674
20.6k
{
1675
1676
20.6k
  RETURN_ERROR_IF(cmp_cfg_gen_par_is_invalid(cfg), PAR_GENERIC, "");
1677
1678
20.6k
  if (cmp_imagette_data_type_is_used(cfg->data_type))
1679
5.69k
    RETURN_ERROR_IF(cmp_cfg_imagette_is_invalid(cfg), PAR_SPECIFIC, "");
1680
14.9k
  else if (cmp_fx_cob_data_type_is_used(cfg->data_type))
1681
8.10k
    RETURN_ERROR_IF(cmp_cfg_fx_cob_is_invalid(cfg), PAR_SPECIFIC, "");
1682
6.86k
  else
1683
6.86k
    RETURN_ERROR_IF(cmp_cfg_aux_is_invalid(cfg), PAR_SPECIFIC, "");
1684
1685
20.6k
  FORWARD_IF_ERROR(check_compression_buffers(cfg), "");
1686
1687
20.6k
  return CMP_ERROR(NO_ERROR);
1688
20.6k
}
1689
1690
1691
/**
1692
 * @brief calculate the optimal spill threshold value for zero escape mechanism
1693
 *
1694
 * @param golomb_par  Golomb parameter
1695
 * @param max_data_bits maximum number of used data bits
1696
 *
1697
 * @returns the highest optimal spill threshold value for a given Golomb
1698
 *  parameter, when the zero escape mechanism is used or 0 if the
1699
 *  Golomb parameter is not valid
1700
 */
1701
1702
static uint32_t cmp_best_zero_spill(uint32_t golomb_par, uint32_t max_data_bits)
1703
4.66k
{
1704
4.66k
  uint32_t const max_spill = cmp_icu_max_spill(golomb_par);
1705
4.66k
  uint32_t cutoff;
1706
4.66k
  uint32_t spill;
1707
1708
4.66k
  if (golomb_par < MIN_NON_IMA_GOLOMB_PAR)
1709
387
    return 0;
1710
4.27k
  if (golomb_par > MAX_NON_IMA_GOLOMB_PAR)
1711
705
    return 0;
1712
1713
3.57k
  cutoff = (0x2U << ilog_2(golomb_par)) - golomb_par;
1714
3.57k
  spill = max_data_bits * golomb_par + cutoff;
1715
3.57k
  if (spill > max_spill)
1716
3.00k
    spill = max_spill;
1717
1718
3.57k
  return spill;
1719
4.27k
}
1720
1721
1722
/**
1723
 * @brief estimate a "good" spillover threshold parameter
1724
 *
1725
 * @param golomb_par  Golomb parameter
1726
 * @param cmp_mode  compression mode
1727
 * @param max_data_bits maximum number of used data bits
1728
 *
1729
 * @returns a spillover threshold parameter or 0 if the Golomb parameter is not
1730
 *  valid
1731
 */
1732
1733
static uint32_t cmp_get_spill(uint32_t golomb_par, enum cmp_mode cmp_mode,
1734
            uint32_t max_data_bits)
1735
8.11k
{
1736
8.11k
  if (zero_escape_mech_is_used(cmp_mode))
1737
4.66k
    return cmp_best_zero_spill(golomb_par, max_data_bits);
1738
1739
3.44k
  return cmp_icu_max_spill(golomb_par);
1740
8.11k
}
1741
1742
1743
/**
1744
 * @brief set the compressed collection size field
1745
 *
1746
 * @param cmp_col_size_field  pointer to the compressed collection size field
1747
 * @param cmp_col_size    size of the compressed collection (not including
1748
 *        the compressed collection header size and the
1749
 *        size of the compressed collection size field
1750
 *        itself)
1751
 *
1752
 * @returns error code
1753
 */
1754
1755
static uint32_t set_cmp_col_size(uint8_t *cmp_col_size_field, uint32_t cmp_col_size)
1756
7.37k
{
1757
7.37k
  uint16_t const v = cpu_to_be16((uint16_t)cmp_col_size);
1758
1759
7.37k
  RETURN_ERROR_IF(cmp_col_size > UINT16_MAX, INT_CMP_COL_TOO_LARGE,
1760
7.37k
      "%"PRIu32" is bigger than the maximum allowed compression collection size", cmp_col_size_field);
1761
1762
7.37k
  memcpy(cmp_col_size_field, &v, CMP_COLLECTION_FILD_SIZE);
1763
1764
7.37k
  return 0;
1765
7.37k
}
1766
1767
1768
/**
1769
 * @brief compresses a collection (with a collection header followed by data)
1770
 *
1771
 * @param col   pointer to a collection header
1772
 * @param model   pointer to the model to be used for compression, or NULL
1773
 *      if not applicable
1774
 * @param updated_model pointer to the buffer where the updated model will be
1775
 *      stored, or NULL if not applicable
1776
 * @param dst   pointer to the buffer where the compressed data will be
1777
 *      stored, or NULL to only get the compressed data size
1778
 * @param dst_capacity  the size of the dst buffer in bytes
1779
 * @param cfg   pointer to a compression configuration
1780
 * @param dst_size  the already used size of the dst buffer in bytes
1781
 *
1782
 * @returns the size of the compressed data in bytes (new dst_size) on
1783
 *  success or an error code if it fails (which can be tested with
1784
 *  cmp_is_error())
1785
 */
1786
static uint32_t cmp_collection(const uint8_t *col,
1787
             const uint8_t *model, uint8_t *updated_model,
1788
             uint32_t *dst, uint32_t dst_capacity,
1789
             struct cmp_cfg *cfg, uint32_t dst_size)
1790
20.6k
{
1791
20.6k
  uint32_t const dst_size_begin = dst_size;
1792
20.6k
  uint32_t dst_size_bits;
1793
20.6k
  const struct collection_hdr *col_hdr = (const struct collection_hdr *)col;
1794
20.6k
  uint16_t const col_data_length = cmp_col_get_data_length(col_hdr);
1795
20.6k
  uint16_t sample_size;
1796
1797
  /* sanity check of the collection header */
1798
20.6k
  cfg->data_type = convert_subservice_to_cmp_data_type(cmp_col_get_subservice(col_hdr));
1799
20.6k
  sample_size = (uint16_t)size_of_a_sample(cfg->data_type);
1800
20.6k
  RETURN_ERROR_IF(col_data_length % sample_size, COL_SIZE_INCONSISTENT,
1801
20.6k
      "col_data_length: %u %% sample_size: %u != 0", col_data_length, sample_size);
1802
20.6k
  cfg->samples = col_data_length/sample_size;
1803
1804
  /* prepare the different buffers */
1805
20.6k
  cfg->src = col + COLLECTION_HDR_SIZE;
1806
20.6k
  if (model)
1807
8.76k
    cfg->model_buf = model + COLLECTION_HDR_SIZE;
1808
20.6k
  if (updated_model)
1809
12.9k
    cfg->updated_model_buf = updated_model + COLLECTION_HDR_SIZE;
1810
20.6k
  cfg->dst = dst;
1811
20.6k
  cfg->stream_size = dst_capacity;
1812
20.6k
  FORWARD_IF_ERROR(cmp_cfg_icu_is_invalid_error_code(cfg), "");
1813
1814
20.6k
  if (cfg->cmp_mode != CMP_MODE_RAW) {
1815
    /* hear we reserve space for the compressed data size field */
1816
17.9k
    dst_size += CMP_COLLECTION_FILD_SIZE;
1817
17.9k
  }
1818
1819
  /* we do not compress the collection header, we simply copy the header
1820
   * into the compressed data
1821
   */
1822
20.6k
  if (dst) {
1823
8.95k
    RETURN_ERROR_IF(dst_size + COLLECTION_HDR_SIZE > dst_capacity,
1824
8.95k
        SMALL_BUFFER, "");
1825
8.94k
    memcpy((uint8_t *)dst + dst_size, col, COLLECTION_HDR_SIZE);
1826
8.94k
  }
1827
20.6k
  dst_size += COLLECTION_HDR_SIZE;
1828
20.6k
  if (model_mode_is_used(cfg->cmp_mode) && updated_model)
1829
6.16k
    memcpy(updated_model, col, COLLECTION_HDR_SIZE);
1830
1831
  /* is enough capacity in the dst buffer to store the data uncompressed */
1832
20.6k
  if ((dst == NULL || dst_capacity >= dst_size + col_data_length) &&
1833
20.6k
      cfg->cmp_mode != CMP_MODE_RAW) {
1834
    /* we set the compressed buffer size to the data size -1 to provoke
1835
     * a CMP_ERROR_SMALL_BUFFER error if the data are not compressible
1836
     */
1837
17.8k
    cfg->stream_size = dst_size + col_data_length - 1;
1838
17.8k
    dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1839
1840
17.8k
    if (cmp_get_error_code(dst_size_bits) == CMP_ERROR_SMALL_BUFFER ||
1841
17.8k
        (!dst && dst_size_bits > cmp_stream_size_to_bits(cfg->stream_size))) { /* if dst == NULL compress_data_internal will not return a CMP_ERROR_SMALL_BUFFER */
1842
      /* can not compress the data with the given parameters;
1843
       * put them uncompressed (raw) into the dst buffer */
1844
15.4k
      enum cmp_mode cmp_mode_cpy = cfg->cmp_mode;
1845
1846
15.4k
      cfg->stream_size = dst_size + col_data_length;
1847
15.4k
      cfg->cmp_mode = CMP_MODE_RAW;
1848
15.4k
      dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1849
15.4k
      cfg->cmp_mode = cmp_mode_cpy;
1850
      /* updated model is in this case a copy of the data to compress */
1851
15.4k
      if (model_mode_is_used(cfg->cmp_mode) && cfg->updated_model_buf)
1852
5.67k
        memcpy(cfg->updated_model_buf, cfg->src, col_data_length);
1853
15.4k
    }
1854
17.8k
  } else {
1855
2.74k
    cfg->stream_size = dst_capacity;
1856
2.74k
    dst_size_bits = compress_data_internal(cfg, dst_size << 3);
1857
2.74k
  }
1858
20.6k
  FORWARD_IF_ERROR(dst_size_bits, "compression failed");
1859
1860
20.5k
  dst_size = cmp_bit_to_byte(dst_size_bits);
1861
20.5k
  if (cfg->cmp_mode != CMP_MODE_RAW && dst) {
1862
7.37k
    uint8_t *cmp_col_size_field = (uint8_t *)dst+dst_size_begin;
1863
7.37k
    uint32_t cmp_col_size = dst_size - dst_size_begin -
1864
7.37k
      COLLECTION_HDR_SIZE - CMP_COLLECTION_FILD_SIZE;
1865
1866
7.37k
    FORWARD_IF_ERROR(set_cmp_col_size(cmp_col_size_field, cmp_col_size), "");
1867
7.37k
  }
1868
1869
20.5k
  return dst_size;
1870
20.5k
}
1871
1872
1873
/**
1874
 * @brief builds a compressed entity header for a compressed chunk
1875
 *
1876
 * @param entity    start address of the compression entity header
1877
 *        (can be NULL if you only want the entity header
1878
 *        size)
1879
 * @param chunk_size    the original size of the chunk in bytes
1880
 * @param cfg     pointer to the compression configuration used to
1881
 *        compress the chunk
1882
 * @param start_timestamp the start timestamp of the chunk compression
1883
 * @param cmp_ent_size_byte the size of the compression entity (entity
1884
 *        header plus compressed data)
1885
 *
1886
 * @return the size of the compressed entity header in bytes or an error code
1887
 *  if it fails (which can be tested with cmp_is_error())
1888
 */
1889
1890
static uint32_t cmp_ent_build_chunk_header(uint32_t *entity, uint32_t chunk_size,
1891
             const struct cmp_cfg *cfg, uint64_t start_timestamp,
1892
             uint32_t cmp_ent_size_byte)
1893
3.12k
{
1894
3.12k
  if (entity) { /* setup the compressed entity header */
1895
442
    struct cmp_entity *ent = (struct cmp_entity *)entity;
1896
442
    int err = 0;
1897
1898
442
    err |= cmp_ent_set_version_id(ent, version_identifier); /* set by compress_chunk_init */
1899
442
    err |= cmp_ent_set_size(ent, cmp_ent_size_byte);
1900
442
    err |= cmp_ent_set_original_size(ent, chunk_size);
1901
442
    err |= cmp_ent_set_data_type(ent, DATA_TYPE_CHUNK, cfg->cmp_mode == CMP_MODE_RAW);
1902
442
    err |= cmp_ent_set_cmp_mode(ent, cfg->cmp_mode);
1903
442
    err |= cmp_ent_set_model_value(ent, cfg->model_value);
1904
    /* model id/counter are set by the user with the compress_chunk_set_model_id_and_counter() */
1905
442
    err |= cmp_ent_set_model_id(ent, 0);
1906
442
    err |= cmp_ent_set_model_counter(ent, 0);
1907
442
    err |= cmp_ent_set_reserved(ent, 0);
1908
442
    err |= cmp_ent_set_lossy_cmp_par(ent, cfg->round);
1909
442
    if (cfg->cmp_mode != CMP_MODE_RAW) {
1910
333
      err |= cmp_ent_set_non_ima_spill1(ent, cfg->spill_par_1);
1911
333
      err |= cmp_ent_set_non_ima_cmp_par1(ent, cfg->cmp_par_1);
1912
333
      err |= cmp_ent_set_non_ima_spill2(ent, cfg->spill_par_2);
1913
333
      err |= cmp_ent_set_non_ima_cmp_par2(ent, cfg->cmp_par_2);
1914
333
      err |= cmp_ent_set_non_ima_spill3(ent, cfg->spill_par_3);
1915
333
      err |= cmp_ent_set_non_ima_cmp_par3(ent, cfg->cmp_par_3);
1916
333
      err |= cmp_ent_set_non_ima_spill4(ent, cfg->spill_par_4);
1917
333
      err |= cmp_ent_set_non_ima_cmp_par4(ent, cfg->cmp_par_4);
1918
333
      err |= cmp_ent_set_non_ima_spill5(ent, cfg->spill_par_5);
1919
333
      err |= cmp_ent_set_non_ima_cmp_par5(ent, cfg->cmp_par_5);
1920
333
      err |= cmp_ent_set_non_ima_spill6(ent, cfg->spill_par_6);
1921
333
      err |= cmp_ent_set_non_ima_cmp_par6(ent, cfg->cmp_par_6);
1922
333
    }
1923
442
    RETURN_ERROR_IF(err, ENTITY_HEADER, "");
1924
201
    RETURN_ERROR_IF(cmp_ent_set_start_timestamp(ent, start_timestamp),
1925
201
        ENTITY_TIMESTAMP, "");
1926
201
    RETURN_ERROR_IF(cmp_ent_set_end_timestamp(ent, get_timestamp()),
1927
201
        ENTITY_TIMESTAMP, "");
1928
201
  }
1929
1930
2.88k
  if (cfg->cmp_mode == CMP_MODE_RAW)
1931
368
    return GENERIC_HEADER_SIZE;
1932
2.51k
  else
1933
2.51k
    return NON_IMAGETTE_HEADER_SIZE;
1934
2.88k
}
1935
1936
1937
/**
1938
 * @brief Set the compression configuration from the compression parameters
1939
 *  based on the chunk type of the collection
1940
 *
1941
 * @param[in] col pointer to a collection header
1942
 * @param[in] par pointer to a compression parameters struct
1943
 * @param[out] cfg  pointer to a compression configuration
1944
 *
1945
 * @returns the chunk type of the collection
1946
 */
1947
1948
static enum chunk_type init_cmp_cfg_from_cmp_par(const struct collection_hdr *col,
1949
             const struct cmp_par *par,
1950
             struct cmp_cfg *cfg)
1951
1.71k
{
1952
1.71k
  enum chunk_type chunk_type = cmp_col_get_chunk_type(col);
1953
1954
1.71k
  memset(cfg, 0, sizeof(struct cmp_cfg));
1955
1956
  /* the ranges of the parameters are checked in cmp_cfg_icu_is_invalid_error_code() */
1957
1.71k
  cfg->cmp_mode = par->cmp_mode;
1958
1.71k
  cfg->model_value = par->model_value;
1959
1.71k
  if (par->lossy_par)
1960
1.07k
    debug_print("Warning: lossy compression is not supported for chunk compression, lossy_par will be ignored.");
1961
1.71k
  cfg->round = 0;
1962
1963
1.71k
  switch (chunk_type) {
1964
80
  case CHUNK_TYPE_NCAM_IMAGETTE:
1965
80
    cfg->cmp_par_imagette = par->nc_imagette;
1966
80
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette, cfg->cmp_mode,
1967
80
                MAX_USED_BITS.nc_imagette);
1968
80
    break;
1969
121
  case CHUNK_TYPE_SAT_IMAGETTE:
1970
121
    cfg->cmp_par_imagette = par->saturated_imagette;
1971
121
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette, cfg->cmp_mode,
1972
121
                MAX_USED_BITS.saturated_imagette);
1973
121
    break;
1974
493
  case CHUNK_TYPE_SHORT_CADENCE:
1975
493
    cfg->cmp_par_exp_flags = par->s_exp_flags;
1976
493
    cfg->spill_exp_flags = cmp_get_spill(cfg->cmp_par_exp_flags, cfg->cmp_mode,
1977
493
                 MAX_USED_BITS.s_exp_flags);
1978
493
    cfg->cmp_par_fx = par->s_fx;
1979
493
    cfg->spill_fx = cmp_get_spill(cfg->cmp_par_fx, cfg->cmp_mode,
1980
493
                MAX_USED_BITS.s_fx);
1981
493
    cfg->cmp_par_ncob = par->s_ncob;
1982
493
    cfg->spill_ncob = cmp_get_spill(cfg->cmp_par_ncob, cfg->cmp_mode,
1983
493
            MAX_USED_BITS.s_ncob);
1984
493
    cfg->cmp_par_efx = par->s_efx;
1985
493
    cfg->spill_efx = cmp_get_spill(cfg->cmp_par_efx, cfg->cmp_mode,
1986
493
                 MAX_USED_BITS.s_efx);
1987
493
    cfg->cmp_par_ecob = par->s_ecob;
1988
493
    cfg->spill_ecob = cmp_get_spill(cfg->cmp_par_ecob, cfg->cmp_mode,
1989
493
            MAX_USED_BITS.s_ecob);
1990
493
    break;
1991
459
  case CHUNK_TYPE_LONG_CADENCE:
1992
459
    cfg->cmp_par_exp_flags = par->l_exp_flags;
1993
459
    cfg->spill_exp_flags = cmp_get_spill(cfg->cmp_par_exp_flags, cfg->cmp_mode,
1994
459
                 MAX_USED_BITS.l_exp_flags);
1995
459
    cfg->cmp_par_fx = par->l_fx;
1996
459
    cfg->spill_fx = cmp_get_spill(cfg->cmp_par_fx, cfg->cmp_mode,
1997
459
                MAX_USED_BITS.l_fx);
1998
459
    cfg->cmp_par_ncob = par->l_ncob;
1999
459
    cfg->spill_ncob = cmp_get_spill(cfg->cmp_par_ncob, cfg->cmp_mode,
2000
459
            MAX_USED_BITS.l_ncob);
2001
459
    cfg->cmp_par_efx = par->l_efx;
2002
459
    cfg->spill_efx = cmp_get_spill(cfg->cmp_par_efx, cfg->cmp_mode,
2003
459
                 MAX_USED_BITS.l_efx);
2004
459
    cfg->cmp_par_ecob = par->l_ecob;
2005
459
    cfg->spill_ecob = cmp_get_spill(cfg->cmp_par_ecob, cfg->cmp_mode,
2006
459
            MAX_USED_BITS.l_ecob);
2007
459
    cfg->cmp_par_fx_cob_variance = par->l_fx_cob_variance;
2008
459
    cfg->spill_fx_cob_variance = cmp_get_spill(cfg->cmp_par_fx_cob_variance,
2009
459
                 cfg->cmp_mode, MAX_USED_BITS.l_fx_cob_variance);
2010
459
    break;
2011
190
  case CHUNK_TYPE_OFFSET_BACKGROUND:
2012
190
    cfg->cmp_par_offset_mean = par->nc_offset_mean;
2013
190
    cfg->spill_offset_mean = cmp_get_spill(cfg->cmp_par_offset_mean,
2014
190
            cfg->cmp_mode, MAX_USED_BITS.nc_offset_mean);
2015
190
    cfg->cmp_par_offset_variance = par->nc_offset_variance;
2016
190
    cfg->spill_offset_variance = cmp_get_spill(cfg->cmp_par_offset_variance,
2017
190
            cfg->cmp_mode, MAX_USED_BITS.nc_offset_variance);
2018
190
    cfg->cmp_par_background_mean = par->nc_background_mean;
2019
190
    cfg->spill_background_mean = cmp_get_spill(cfg->cmp_par_background_mean,
2020
190
            cfg->cmp_mode, MAX_USED_BITS.nc_background_mean);
2021
190
    cfg->cmp_par_background_variance = par->nc_background_variance;
2022
190
    cfg->spill_background_variance = cmp_get_spill(cfg->cmp_par_background_variance,
2023
190
            cfg->cmp_mode, MAX_USED_BITS.nc_background_variance);
2024
190
    cfg->cmp_par_background_pixels_error = par->nc_background_outlier_pixels;
2025
190
    cfg->spill_background_pixels_error = cmp_get_spill(cfg->cmp_par_background_pixels_error,
2026
190
            cfg->cmp_mode, MAX_USED_BITS.nc_background_outlier_pixels);
2027
190
    break;
2028
2029
165
  case CHUNK_TYPE_SMEARING:
2030
165
    cfg->cmp_par_smearing_mean = par->smearing_mean;
2031
165
    cfg->spill_smearing_mean = cmp_get_spill(cfg->cmp_par_smearing_mean,
2032
165
            cfg->cmp_mode, MAX_USED_BITS.smearing_mean);
2033
165
    cfg->cmp_par_smearing_variance = par->smearing_variance_mean;
2034
165
    cfg->spill_smearing_variance = cmp_get_spill(cfg->cmp_par_smearing_variance,
2035
165
            cfg->cmp_mode, MAX_USED_BITS.smearing_variance_mean);
2036
165
    cfg->cmp_par_smearing_pixels_error = par->smearing_outlier_pixels;
2037
165
    cfg->spill_smearing_pixels_error = cmp_get_spill(cfg->cmp_par_smearing_pixels_error,
2038
165
            cfg->cmp_mode, MAX_USED_BITS.smearing_outlier_pixels);
2039
165
    break;
2040
2041
208
  case CHUNK_TYPE_F_CHAIN:
2042
208
    cfg->cmp_par_imagette = par->fc_imagette;
2043
208
    cfg->spill_imagette = cmp_get_spill(cfg->cmp_par_imagette,
2044
208
            cfg->cmp_mode, MAX_USED_BITS.fc_imagette);
2045
2046
208
    cfg->cmp_par_offset_mean = par->fc_offset_mean;
2047
208
    cfg->spill_offset_mean = cmp_get_spill(cfg->cmp_par_offset_mean,
2048
208
            cfg->cmp_mode, MAX_USED_BITS.fc_offset_mean);
2049
208
    cfg->cmp_par_offset_variance = par->fc_offset_variance;
2050
208
    cfg->spill_offset_variance = cmp_get_spill(cfg->cmp_par_offset_variance,
2051
208
            cfg->cmp_mode, MAX_USED_BITS.fc_offset_variance);
2052
2053
208
    cfg->cmp_par_background_mean = par->fc_background_mean;
2054
208
    cfg->spill_background_mean = cmp_get_spill(cfg->cmp_par_background_mean,
2055
208
            cfg->cmp_mode, MAX_USED_BITS.fc_background_mean);
2056
208
    cfg->cmp_par_background_variance = par->fc_background_variance;
2057
208
    cfg->spill_background_variance = cmp_get_spill(cfg->cmp_par_background_variance,
2058
208
            cfg->cmp_mode, MAX_USED_BITS.fc_background_variance);
2059
208
    cfg->cmp_par_background_pixels_error = par->fc_background_outlier_pixels;
2060
208
    cfg->spill_background_pixels_error = cmp_get_spill(cfg->cmp_par_background_pixels_error,
2061
208
            cfg->cmp_mode, MAX_USED_BITS.fc_background_outlier_pixels);
2062
208
    break;
2063
1
  case CHUNK_TYPE_UNKNOWN:
2064
1
  default: /*
2065
      * default case never reached because cmp_col_get_chunk_type
2066
      * returns CHUNK_TYPE_UNKNOWN if the type is unknown
2067
      */
2068
1
    chunk_type = CHUNK_TYPE_UNKNOWN;
2069
1
    break;
2070
1.71k
  }
2071
2072
1.71k
  return chunk_type;
2073
1.71k
}
2074
2075
2076
/**
2077
 * @brief initialise the compress_chunk() function
2078
 *
2079
 * If not initialised the compress_chunk() function sets the timestamps and
2080
 * version_id in the compression entity header to zero
2081
 *
2082
 * @param return_timestamp  pointer to a function returning a current 48-bit
2083
 *        timestamp
2084
 * @param version_id    application software version identifier
2085
 */
2086
2087
void compress_chunk_init(uint64_t (*return_timestamp)(void), uint32_t version_id)
2088
0
{
2089
0
  if (return_timestamp)
2090
0
    get_timestamp = return_timestamp;
2091
2092
0
  version_identifier = version_id;
2093
0
}
2094
2095
2096
/**
2097
 * @brief compress a data chunk consisting of put together data collections
2098
 *
2099
 * @param chunk     pointer to the chunk to be compressed
2100
 * @param chunk_size    byte size of the chunk
2101
 * @param chunk_model   pointer to a model of a chunk; has the same size
2102
 *        as the chunk (can be NULL if no model compression
2103
 *        mode is used)
2104
 * @param updated_chunk_model pointer to store the updated model for the next
2105
 *        model mode compression; has the same size as the
2106
 *        chunk (can be the same as the model_of_data
2107
 *        buffer for in-place update or NULL if updated
2108
 *        model is not needed)
2109
 * @param dst     destination pointer to the compressed data
2110
 *        buffer; has to be 4-byte aligned; can be NULL to
2111
 *        only get the compressed data size
2112
 * @param dst_capacity    capacity of the dst buffer; it's recommended to
2113
 *        provide a dst_capacity >=
2114
 *        compress_chunk_cmp_size_bound(chunk, chunk_size)
2115
 *        as it eliminates one potential failure scenario:
2116
 *        not enough space in the dst buffer to write the
2117
 *        compressed data; size is internally rounded down
2118
 *        to a multiple of 4
2119
 * @param cmp_par   pointer to a compression parameters struct
2120
 * @returns the byte size of the compressed data or an error code if it
2121
 *  fails (which can be tested with cmp_is_error())
2122
 */
2123
2124
uint32_t compress_chunk(const void *chunk, uint32_t chunk_size,
2125
      const void *chunk_model, void *updated_chunk_model,
2126
      uint32_t *dst, uint32_t dst_capacity,
2127
      const struct cmp_par *cmp_par)
2128
1.84k
{
2129
1.84k
  uint64_t const start_timestamp = get_timestamp();
2130
1.84k
  const struct collection_hdr *col = (const struct collection_hdr *)chunk;
2131
1.84k
  enum chunk_type chunk_type;
2132
1.84k
  struct cmp_cfg cfg;
2133
1.84k
  uint32_t cmp_size_byte; /* size of the compressed data in bytes */
2134
1.84k
  size_t read_bytes;
2135
2136
1.84k
  RETURN_ERROR_IF(chunk == NULL, CHUNK_NULL, "");
2137
1.84k
  RETURN_ERROR_IF(cmp_par == NULL, PAR_NULL, "");
2138
1.72k
  RETURN_ERROR_IF(chunk_size < COLLECTION_HDR_SIZE, CHUNK_SIZE_INCONSISTENT,
2139
1.72k
      "chunk_size: %"PRIu32"", chunk_size);
2140
1.71k
  RETURN_ERROR_IF(chunk_size > CMP_ENTITY_MAX_ORIGINAL_SIZE, CHUNK_TOO_LARGE,
2141
1.71k
      "chunk_size: %"PRIu32"", chunk_size);
2142
2143
1.71k
  chunk_type = init_cmp_cfg_from_cmp_par(col, cmp_par, &cfg);
2144
1.71k
  RETURN_ERROR_IF(chunk_type == CHUNK_TYPE_UNKNOWN, COL_SUBSERVICE_UNSUPPORTED,
2145
1.71k
      "unsupported subservice: %u", cmp_col_get_subservice(col));
2146
2147
  /* reserve space for the compression entity header, we will build the
2148
   * header after the compression of the chunk
2149
   */
2150
1.71k
  cmp_size_byte = cmp_ent_build_chunk_header(NULL, chunk_size, &cfg, start_timestamp, 0);
2151
1.71k
  RETURN_ERROR_IF(dst && dst_capacity < cmp_size_byte, SMALL_BUFFER,
2152
1.71k
      "dst_capacity must be at least as large as the minimum size of the compression unit.");
2153
2154
2155
  /* compress one collection after another */
2156
1.71k
  for (read_bytes = 0;
2157
22.2k
       read_bytes <= chunk_size - COLLECTION_HDR_SIZE;
2158
20.7k
       read_bytes += cmp_col_get_size(col)) {
2159
20.7k
    const uint8_t *col_model = NULL;
2160
20.7k
    uint8_t *col_up_model = NULL;
2161
2162
    /* setup pointers for the next collection we want to compress */
2163
20.7k
    col = (const struct collection_hdr *)((const uint8_t *)chunk + read_bytes);
2164
20.7k
    if (chunk_model)
2165
8.78k
      col_model = (const uint8_t *)chunk_model + read_bytes;
2166
20.7k
    if (updated_chunk_model)
2167
12.9k
      col_up_model = (uint8_t *)updated_chunk_model + read_bytes;
2168
2169
20.7k
    RETURN_ERROR_IF(cmp_col_get_chunk_type(col) != chunk_type, CHUNK_SUBSERVICE_INCONSISTENT, "");
2170
2171
    /* chunk size is inconsistent with the sum of sizes in the collection headers */
2172
20.6k
    if (read_bytes + cmp_col_get_size(col) > chunk_size)
2173
12
      break;
2174
2175
20.6k
    cmp_size_byte = cmp_collection((const uint8_t *)col, col_model, col_up_model,
2176
20.6k
                 dst, dst_capacity, &cfg, cmp_size_byte);
2177
20.6k
    FORWARD_IF_ERROR(cmp_size_byte, "error occurred when compressing the collection with offset %u", read_bytes);
2178
20.6k
  }
2179
2180
1.47k
  RETURN_ERROR_IF(read_bytes != chunk_size, CHUNK_SIZE_INCONSISTENT, "");
2181
2182
1.41k
  FORWARD_IF_ERROR(cmp_ent_build_chunk_header(dst, chunk_size, &cfg,
2183
1.41k
              start_timestamp, cmp_size_byte), "");
2184
2185
1.17k
  return cmp_size_byte;
2186
1.41k
}
2187
2188
2189
/**
2190
 * @brief returns the maximum compressed size in a worst-case scenario
2191
 * In case the input data is not compressible
2192
 * This function is primarily useful for memory allocation purposes
2193
 * (destination buffer size).
2194
 *
2195
 * @note if the number of collections is known you can use the
2196
 *  COMPRESS_CHUNK_BOUND macro for compilation-time evaluation
2197
 *  (stack memory allocation for example)
2198
 *
2199
 * @param chunk   pointer to the chunk you want to compress
2200
 * @param chunk_size  size of the chunk in bytes
2201
 *
2202
 * @returns maximum compressed size for a chunk compression on success or an
2203
 *  error code if it fails (which can be tested with cmp_is_error())
2204
 */
2205
2206
uint32_t compress_chunk_cmp_size_bound(const void *chunk, size_t chunk_size)
2207
1.38k
{
2208
1.38k
  int32_t read_bytes;
2209
1.38k
  uint32_t num_col = 0;
2210
1.38k
  size_t bound;
2211
1.38k
  size_t const max_chunk_size = CMP_ENTITY_MAX_ORIGINAL_SIZE
2212
1.38k
    - NON_IMAGETTE_HEADER_SIZE - CMP_COLLECTION_FILD_SIZE;
2213
2214
1.38k
  RETURN_ERROR_IF(chunk == NULL, CHUNK_NULL, "");
2215
1.38k
  RETURN_ERROR_IF(chunk_size < COLLECTION_HDR_SIZE, CHUNK_SIZE_INCONSISTENT, "");
2216
1.26k
  RETURN_ERROR_IF(chunk_size > max_chunk_size, CHUNK_TOO_LARGE,
2217
1.26k
      "chunk_size: %"PRIu32" > max_chunk_size: %"PRIu32"",
2218
1.26k
      chunk_size, max_chunk_size);
2219
2220
  /* count the number of collections in the chunk */
2221
1.26k
  for (read_bytes = 0;
2222
15.9k
       read_bytes <= (int32_t)(chunk_size-COLLECTION_HDR_SIZE);
2223
14.7k
       read_bytes += cmp_col_get_size((const struct collection_hdr *)
2224
14.7k
              ((const uint8_t *)chunk + read_bytes)))
2225
14.7k
    num_col++;
2226
2227
1.26k
  RETURN_ERROR_IF((uint32_t)read_bytes != chunk_size, CHUNK_SIZE_INCONSISTENT, "");
2228
2229
1.12k
  bound = COMPRESS_CHUNK_BOUND_UNSAFE(chunk_size, num_col);
2230
1.12k
  RETURN_ERROR_IF(bound > CMP_ENTITY_MAX_SIZE, CHUNK_TOO_LARGE, "bound: %lu", bound);
2231
2232
1.12k
  return (uint32_t)bound;
2233
1.12k
}
2234
2235
2236
/**
2237
 * @brief set the model id and model counter in the compression entity header
2238
 *
2239
 * @param dst   pointer to the compressed data (starting with a
2240
 *      compression entity header)
2241
 * @param dst_size  byte size of the dst buffer
2242
 * @param model_id  model identifier; for identifying entities that originate
2243
 *      from the same starting model
2244
 * @param model_counter model_counter; counts how many times the model was
2245
 *      updated; for non model mode compression use 0
2246
 *
2247
 * @returns the byte size of the dst buffer (= dst_size) on success or an error
2248
 *  code if it fails (which can be tested with cmp_is_error())
2249
 */
2250
2251
uint32_t compress_chunk_set_model_id_and_counter(void *dst, uint32_t dst_size,
2252
             uint16_t model_id, uint8_t model_counter)
2253
0
{
2254
0
  RETURN_ERROR_IF(dst == NULL, ENTITY_NULL, "");
2255
0
  FORWARD_IF_ERROR(dst_size, "");
2256
0
  RETURN_ERROR_IF(dst_size < GENERIC_HEADER_SIZE, ENTITY_TOO_SMALL,
2257
0
      "dst_size: %"PRIu32"", dst_size);
2258
2259
0
  cmp_ent_set_model_id(dst, model_id);
2260
0
  cmp_ent_set_model_counter(dst, model_counter);
2261
2262
0
  return dst_size;
2263
0
}
2264
2265
2266
/**
2267
 * @brief compress data the same way as the RDCU HW compressor
2268
 *
2269
 * @param rcfg  pointer to a RDCU compression configuration (created with the
2270
 *    rdcu_cfg_create() function, set up with the rdcu_cfg_buffers()
2271
 *    and rdcu_cfg_imagette() functions)
2272
 * @param info  pointer to a compression information structure contains the
2273
 *    metadata of a compression (can be NULL)
2274
 *
2275
 * @returns the bit length of the bitstream on success or an error code if it
2276
 *  fails (which can be tested with cmp_is_error())
2277
 *
2278
 * @warning only the small buffer error in the info.cmp_err field is implemented
2279
 */
2280
2281
uint32_t compress_like_rdcu(const struct rdcu_cfg *rcfg, struct cmp_info *info)
2282
0
{
2283
0
  struct cmp_cfg cfg;
2284
0
  uint32_t cmp_size_bit;
2285
2286
0
  memset(&cfg, 0, sizeof(cfg));
2287
2288
0
  if (info)
2289
0
    memset(info, 0, sizeof(*info));
2290
2291
0
  if (!rcfg)
2292
0
    return compress_data_internal(NULL, 0);
2293
2294
0
  cfg.data_type = DATA_TYPE_IMAGETTE;
2295
2296
0
  cfg.src = rcfg->input_buf;
2297
0
  cfg.model_buf = rcfg->model_buf;
2298
0
  cfg.samples = rcfg->samples;
2299
0
  cfg.stream_size = (rcfg->buffer_length * sizeof(uint16_t));
2300
0
  cfg.cmp_mode = rcfg->cmp_mode;
2301
0
  cfg.model_value = rcfg->model_value;
2302
0
  cfg.round = rcfg->round;
2303
2304
0
  if (info) {
2305
0
    info->cmp_err = 0;
2306
0
    info->cmp_mode_used = (uint8_t)rcfg->cmp_mode;
2307
0
    info->model_value_used = (uint8_t)rcfg->model_value;
2308
0
    info->round_used = (uint8_t)rcfg->round;
2309
0
    info->spill_used = rcfg->spill;
2310
0
    info->golomb_par_used = rcfg->golomb_par;
2311
0
    info->samples_used = rcfg->samples;
2312
0
    info->rdcu_new_model_adr_used = rcfg->rdcu_new_model_adr;
2313
0
    info->rdcu_cmp_adr_used = rcfg->rdcu_buffer_adr;
2314
0
    info->cmp_size = 0;
2315
0
    info->ap1_cmp_size = 0;
2316
0
    info->ap2_cmp_size = 0;
2317
2318
0
    cfg.cmp_par_imagette = rcfg->ap1_golomb_par;
2319
0
    cfg.spill_imagette = rcfg->ap1_spill;
2320
0
    if (cfg.cmp_par_imagette &&
2321
0
        cmp_cfg_icu_is_invalid_error_code(&cfg) == CMP_ERROR_NO_ERROR)
2322
0
      info->ap1_cmp_size = compress_data_internal(&cfg, 0);
2323
2324
2325
0
    cfg.cmp_par_imagette = rcfg->ap2_golomb_par;
2326
0
    cfg.spill_imagette = rcfg->ap2_spill;
2327
0
    if (cfg.cmp_par_imagette &&
2328
0
        cmp_cfg_icu_is_invalid_error_code(&cfg) == CMP_ERROR_NO_ERROR)
2329
0
      info->ap2_cmp_size = compress_data_internal(&cfg, 0);
2330
0
  }
2331
2332
0
  cfg.cmp_par_imagette = rcfg->golomb_par;
2333
0
  cfg.spill_imagette = rcfg->spill;
2334
0
  cfg.updated_model_buf = rcfg->icu_new_model_buf;
2335
0
  cfg.dst = rcfg->icu_output_buf;
2336
2337
0
  FORWARD_IF_ERROR(cmp_cfg_icu_is_invalid_error_code(&cfg), "");
2338
2339
0
  cmp_size_bit = compress_data_internal(&cfg, 0);
2340
2341
0
  if (info) {
2342
0
    if (cmp_get_error_code(cmp_size_bit) == CMP_ERROR_SMALL_BUFFER)
2343
0
      info->cmp_err |= 1UL << 0;/* SMALL_BUFFER_ERR_BIT;*/ /* set small buffer error */
2344
0
    if (cmp_is_error(cmp_size_bit)) {
2345
0
      info->cmp_size = 0;
2346
0
      info->ap1_cmp_size = 0;
2347
0
      info->ap2_cmp_size = 0;
2348
0
    } else {
2349
0
      info->cmp_size = cmp_size_bit;
2350
0
    }
2351
0
  }
2352
2353
0
  return cmp_size_bit;
2354
0
}